京公网安备 11010802034615号
经营许可证编号:京B2-20210330
感知机(Perceptron)或者叫做感知器,是Frank Rosenblatt在1957年就职于Cornell航空实验室(Cornell Aeronautical Laboratory)时所发明的一种人工神经网络,是机器学习领域最基础的模型,被誉为机器学习的敲门砖。
感知机是生物神经细胞的简单抽象,可以说是形式最简单的一种前馈神经网络,是一种二元线性分类模型。感知机的输入为实例的特征向量,输出为实例的类别取+1和-1.虽然现在看来感知机的分类模型,大多数情况下的泛化能力不是很强,但是感知机是最古老的分类方法之一,是神经网络的雏形,同时也是支持向量机的基础,如果能够将感知机研究透彻,对我们支持向量机、神经网络的学习也有很大帮助。
一、感知机模型
感知机的几何解释:线性方程
二·、感知机算法
1.原始形式
from random import randint import numpy as np import matplotlib.pyplot as plt class TrainDataLoader: def __init__(self): pass def GenerateRandomData(self, count, gradient, offset): x1 = np.linspace(1, 5, count) x2 = gradient*x1 + np.random.randint(-10,10,*x1.shape)+offset dataset = [] y = [] for i in range(*x1.shape): dataset.append([x1[i], x2[i]]) real_value = gradient*x1[i]+offset if real_value > x2[i]: y.append(-1) else: y.append(1) return x1,x2,np.mat(y),np.mat(dataset) class SimplePerceptron: def __init__(self, train_data = [], real_result = [], eta = 1): self.w = np.zeros([1, len(train_data.T)], int) self.b = 0 self.eta = eta self.train_data = train_data self.real_result = real_result def nomalize(self, x): if x > 0 : return 1 else : return -1 def model(self, x): # Here are matrix dot multiply get one value y = np.dot(x, self.w.T) + self.b # Use sign to nomalize the result predict_v = self.nomalize(y) return predict_v, y def update(self, x, y): # w = w + n*y_i*x_i self.w = self.w + self.eta*y*x # b = b + n*y_i self.b = self.b + self.eta*y def loss(slef, fx, y): return fx.astype(int)*y def train(self, count): update_count = 0 while count > 0: # count-- count = count - 1 if len(self.train_data) <= 0: print("exception exit") break # random select one train data index = randint(0,len(self.train_data)-1) x = self.train_data[index] y = self.real_result.T[index] # wx+b predict_v, linear_y_v = self.model(x) # y_i*(wx+b) > 0, the classify is correct, else it's error if self.loss(y, linear_y_v) > 0: continue update_count = update_count + 1 self.update(x, y) print("update count: ", update_count) pass def verify(self, verify_data, verify_result): size = len(verify_data) failed_count = 0 if size <= 0: pass for i in range(size): x = verify_data[i] y = verify_result.T[i] if self.loss(y, self.model(x)[1]) > 0: continue failed_count = failed_count + 1 success_rate = (1.0 - (float(failed_count)/size))*100 print("Success Rate: ", success_rate, "%") print("All input: ", size, " failed_count: ", failed_count) def predict(self, predict_data): size = len(predict_data) result = [] if size <= 0: pass for i in range(size): x = verify_data[i] y = verify_result.T[i] result.append(self.model(x)[0]) return result if __name__ == "__main__": # Init some parameters gradient = 2 offset = 10 point_num = 1000 train_num = 50000 loader = TrainDataLoader() x, y, result, train_data = loader.GenerateRandomData(point_num, gradient, offset) x_t, y_t, test_real_result, test_data = loader.GenerateRandomData(100, gradient, offset) # First training perceptron = SimplePerceptron(train_data, result) perceptron.train(train_num) perceptron.verify(test_data, test_real_result) print("T1: w:", perceptron.w," b:", perceptron.b) # Draw the figure # 1. draw the (x,y) points plt.plot(x, y, "*", color='gray') plt.plot(x_t, y_t, "+") # 2. draw y=gradient*x+offset line plt.plot(x,x.dot(gradient)+offset, color="red") # 3. draw the line w_1*x_1 + w_2*x_2 + b = 0 plt.plot(x, -(x.dot(float(perceptron.w.T[0]))+float(perceptron.b))/float(perceptron.w.T[1]) , color='green') plt.show()2.对偶形式
from random import randint import numpy as np import matplotlib.pyplot as plt class TrainDataLoader: def __init__(self): pass def GenerateRandomData(self, count, gradient, offset): x1 = np.linspace(1, 5, count) x2 = gradient*x1 + np.random.randint(-10,10,*x1.shape)+offset dataset = [] y = [] for i in range(*x1.shape): dataset.append([x1[i], x2[i]]) real_value = gradient*x1[i]+offset if real_value > x2[i]: y.append(-1) else: y.append(1) return x1,x2,np.mat(y),np.mat(dataset) class SimplePerceptron: def __init__(self, train_data = [], real_result = [], eta = 1): self.alpha = np.zeros([train_data.shape[0], 1], int) self.w = np.zeros([1, train_data.shape[1]], int) self.b = 0 self.eta = eta self.train_data = train_data self.real_result = real_result self.gram = np.matmul(train_data[0:train_data.shape[0]], train_data[0:train_data.shape[0]].T) def nomalize(self, x): if x > 0 : return 1 else : return -1 def train_model(self, index): temp = 0 y = self.real_result.T # Here are matrix dot multiply get one value for i in range(len(self.alpha)): alpha = self.alpha[i] if alpha == 0: continue gram_value = self.gram[index].T[i] temp = temp + alpha*y[i]*gram_value y = temp + self.b # Use sign to nomalize the result predict_v = self.nomalize(y) return predict_v, y def verify_model(self, x): # Here are matrix dot multiply get one value y = np.dot(x, self.w.T) + self.b # Use sign to nomalize the result predict_v = self.nomalize(y) return predict_v, y def update(self, index, x, y): # alpha = alpha + 1 self.alpha[index] = self.alpha[index] + 1 # b = b + n*y_i self.b = self.b + self.eta*y def loss(slef, fx, y): return fx.astype(int)*y def train(self, count): update_count = 0 train_data_num = self.train_data.shape[0] print("train_data:", self.train_data) print("Gram:",self.gram) while count > 0: # count-- count = count - 1 if train_data_num <= 0: print("exception exit") break # random select one train data index = randint(0, train_data_num-1) if index >= train_data_num: print("exceptrion get the index") break; x = self.train_data[index] y = self.real_result.T[index] # w = \sum_{i=1}^{N}\alpha_iy_iGram[i] # wx+b predict_v, linear_y_v = self.train_model(index) # y_i*(wx+b) > 0, the classify is correct, else it's error if self.loss(y, linear_y_v) > 0: continue update_count = update_count + 1 self.update(index, x, y) for i in range(len(self.alpha)): x = self.train_data[i] y = self.real_result.T[i] self.w = self.w + float(self.alpha[i])*x*float(y) print("update count: ", update_count) pass def verify(self, verify_data, verify_result): size = len(verify_data) failed_count = 0 if size <= 0: pass for i in range(size-1): x = verify_data[i] y = verify_result.T[i] if self.loss(y, self.verify_model(x)[1]) > 0: continue failed_count = failed_count + 1 success_rate = (1.0 - (float(failed_count)/size))*100 print("Success Rate: ", success_rate, "%") print("All input: ", size, " failed_count: ", failed_count) def predict(self, predict_data): size = len(predict_data) result = [] if size <= 0: pass for i in range(size): x = verify_data[i] y = verify_result.T[i] result.append(self.model(x)[0]) return result if __name__ == "__main__": # Init some parameters gradient = 2 offset = 10 point_num = 1000 train_num = 1000 loader = TrainDataLoader() x, y, result, train_data = loader.GenerateRandomData(point_num, gradient, offset) x_t, y_t, test_real_result, test_data = loader.GenerateRandomData(100, gradient, offset) # train_data = np.mat([[3,3],[4,3],[1,1]]) # First training perceptron = SimplePerceptron(train_data, result) perceptron.train(train_num) perceptron.verify(test_data, test_real_result) print("T1: w:", perceptron.w," b:", perceptron.b) # Draw the figure # 1. draw the (x,y) points plt.plot(x, y, "*", color='gray') plt.plot(x_t, y_t, "+") # 2. draw y=gradient*x+offset line plt.plot(x,x.dot(gradient)+offset, color="red") # 3. draw the line w_1*x_1 + w_2*x_2 + b = 0 plt.plot(x, -(x.dot(float(perceptron.w.T[0]))+float(perceptron.b))/float(perceptron.w.T[1]) , color='green') plt.show()
数据分析咨询请扫描二维码
若不方便扫码,搜微信号:CDAshujufenxi
机器学习的本质,是让模型通过对数据的学习,自主挖掘规律、实现预测与决策,而这一过程的核心驱动力,并非单一参数的独立作用, ...
2026-03-27在SQL Server数据库操作中,日期时间处理是高频核心需求——无论是报表统计中的日期格式化、数据筛选时的日期类型匹配,还是业务 ...
2026-03-27在CDA(Certified Data Analyst)数据分析师的能力体系与职场实操中,高维数据处理是高频且核心的痛点——随着业务场景的复杂化 ...
2026-03-27在机器学习建模与数据分析实战中,特征维度爆炸、冗余信息干扰、模型泛化能力差是高频痛点。面对用户画像、企业经营、医疗检测、 ...
2026-03-26在这个数据无处不在的时代,数据分析能力已不再是数据从业者的专属技能,而是成为了职场人、管理者、创业者乃至个人发展的核心竞 ...
2026-03-26在CDA(Certified Data Analyst)数据分析师的能力体系中,线性回归是连接描述性统计与预测性分析的关键桥梁,也是CDA二级认证的 ...
2026-03-26在数据分析、市场研究、用户画像构建、学术研究等场景中,我们常常会遇到多维度、多指标的数据难题:比如调研用户消费行为时,收 ...
2026-03-25在流量红利见顶、获客成本持续攀升的当下,营销正从“广撒网”的经验主义,转向“精耕细作”的数据驱动主义。数据不再是营销的辅 ...
2026-03-25在CDA(Certified Data Analyst)数据分析师的全流程工作中,无论是前期的数据探索、影响因素排查,还是中期的特征筛选、模型搭 ...
2026-03-25在当下数据驱动决策的职场环境中,A/B测试早已成为互联网产品、运营、营销乃至产品迭代优化的核心手段,小到一个按钮的颜色、文 ...
2026-03-24在统计学数据分析中,尤其是分类数据的分析场景里,卡方检验和显著性检验是两个高频出现的概念,很多初学者甚至有一定统计基础的 ...
2026-03-24在CDA(Certified Data Analyst)数据分析师的日常业务分析与统计建模工作中,多组数据差异对比是高频且核心的分析场景。比如验 ...
2026-03-24日常用Excel做数据管理、台账维护、报表整理时,添加备注列是高频操作——用来标注异常、说明业务背景、记录处理进度、补充关键 ...
2026-03-23作为业内主流的自助式数据可视化工具,Tableau凭借拖拽式操作、强大的数据联动能力、灵活的仪表板搭建,成为数据分析师、业务人 ...
2026-03-23在CDA(Certified Data Analyst)数据分析师的日常工作与认证考核中,分类变量的关联分析是高频核心场景。用户性别是否影响商品 ...
2026-03-23在数据工作的全流程中,数据清洗是最基础、最耗时,同时也是最关键的核心环节,无论后续是做常规数据分析、可视化报表,还是开展 ...
2026-03-20在大数据与数据驱动决策的当下,“数据分析”与“数据挖掘”是高频出现的两个核心概念,也是很多职场人、入门学习者容易混淆的术 ...
2026-03-20在CDA(Certified Data Analyst)数据分析师的全流程工作闭环中,统计制图是连接严谨统计分析与高效业务沟通的关键纽带,更是CDA ...
2026-03-20在MySQL数据库优化中,分区表是处理海量数据的核心手段——通过将大表按分区键(如时间、地域、ID范围)分割为多个独立的小分区 ...
2026-03-19在商业智能与数据可视化领域,同比、环比增长率是分析数据变化趋势的核心指标——同比(YoY)聚焦“长期趋势”,通过当前周期与 ...
2026-03-19