深度学习实验

news/2025/3/13 21:30:25/

实验一 numpy创建全连接神经网络

import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, datasets, optimizersos.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"# 准备数据
def mnist_dataset():(x, y), (x_test, y_test) = datasets.mnist.load_data()x = x / 255.0x_test = x_test / 255.0return (x, y), (x_test, y_test)# 定义矩阵乘法类
class Matmul():def __init__(self):self.mem = {}def forward(self, x, W):h = np.matmul(x, W)self.mem = {"x": x, "W": W}return hdef backward(self, grad_y):x = self.mem["x"]W = self.mem["W"]grad_x = np.matmul(grad_y, W.T)grad_W = np.matmul(x.T, grad_y)return grad_x, grad_W# 定义Relu类
class Relu():def __init__(self):self.mem = {}def forward(self, x):self.mem["x"] = xreturn np.where(x > 0, x, np.zeros_like(x))def backward(self, grad_y):x = self.mem["x"]return (x > 0).astype(np.float32) * grad_y# 定义Softmax类
class Softmax():def __init__(self):self.mem = {}self.epsilon = 1e-12def forward(self, x):x_exp = np.exp(x)denominator = np.sum(x_exp, axis=1, keepdims=True)out = x_exp / (denominator + self.epsilon)self.mem["out"] = outself.mem["x_exp"] = x_expreturn outdef backward(self, grad_y):s = self.mem["out"]sisj = np.matmul(np.expand_dims(s, axis=2), np.expand_dims(s, axis=1))g_y_exp = np.expand_dims(grad_y, axis=1)tmp = np.matmul(g_y_exp, sisj)tmp = np.squeeze(tmp, axis=1)softmax_grad = -tmp + grad_y * sreturn softmax_grad# 定义交叉熵类
class Cross_entropy():def __init__(self):self.epsilon = 1e-12self.mem = {}def forward(self, x, labels):log_prob = np.log(x + self.epsilon)out = np.mean(np.sum(-log_prob * labels, axis=1))self.mem["x"] = xreturn outdef backward(self, labels):x = self.mem["x"]return -1 / (x + self.epsilon) * labels# 建立模型
class myModel():def __init__(self):self.W1 = np.random.normal(size=[28*28+1, 100])self.W2 = np.random.normal(size=[100, 10])self.mul_h1 = Matmul()self.relu = Relu()self.mul_h2 = Matmul()self.softmax = Softmax()self.cross_en = Cross_entropy()def forward(self, x, labels):x = x.reshape(-1, 28*28)bias = np.ones(shape=[x.shape[0], 1])x = np.concatenate([x, bias], axis=1)self.h1 = self.mul_h1.forward(x, self.W1)self.h1_relu = self.relu.forward(self.h1)self.h2 = self.mul_h2.forward(self.h1_relu, self.W2)self.h2_soft = self.softmax.forward(self.h2)self.loss = self.cross_en.forward(self.h2_soft, labels)def backward(self, labels):self.loss_grad = self.cross_en.backward(labels)self.h2_soft_grad = self.softmax.backward(self.loss_grad)self.h2_grad, self.W2_grad = self.mul_h2.backward(self.h2_soft_grad)self.h1_relu_grad = self.relu.backward(self.h2_grad)self.h1_grad, self.W1_grad = self.mul_h1.backward(self.h1_relu_grad)# 计算准确率
def compute_accuracy(prob, labels):predictions = np.argmax(prob, axis=1)truth = np.argmax(labels, axis=1)return np.mean(predictions == truth)# 迭代一个epoch
def train_one_step(model, x, y):model.forward(x, y)model.backward(y)model.W1 -= 1e-5 * model.W1_gradmodel.W2 -= 1e-5 * model.W2_gradloss = model.lossaccuracy = compute_accuracy(model.h2_soft, y)return loss, accuracy# 计算测试集上的loss和准确率
def test(model, x, y):model.forward(x, y)loss = model.lossaccuracy = compute_accuracy(model.h2_soft, y)return loss, accuracy# 实际训练
train_data, test_data = mnist_dataset()
train_label = np.zeros(shape=[train_data[0].shape[0], 10])
test_label = np.zeros(shape=[test_data[0].shape[0], 10])
train_label[np.arange(train_data[0].shape[0]), np.array(train_data[1])] = 1
test_label[np.arange(test_data[0].shape[0]), np.array(test_data[1])] = 1model = myModel()for epoch in range(50):loss, accuracy = train_one_step(model, train_data[0], train_label)print(f'epoch {epoch} : loss {loss} ; accuracy {accuracy}')# 测试
loss, accuracy = test(model, test_data[0], test_label)
print(f'test loss {loss} ; accuracy {accuracy}')

实验二 Pytorch 的CNN

pip install torch==1.12.1+cu102 torchvision==0.13.1+cu102 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu102

import torch
from torch import nn
import torchvision
from torchvision import datasets, transforms
from tqdm import tqdm# Hyper parameters
BATCH_SIZE = 100
EPOCHS = 10
LEARNING_RATE = 1e-4
KEEP_PROB_RATE = 0.7# Set device to use
device = "cuda:0" if torch.cuda.is_available() else "cpu"# Data transformation
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.5], std=[0.5])
])# Download and load dataset
path = './data/'
train_data = datasets.MNIST(path, train=True, transform=transform, download=True)
test_data = datasets.MNIST(path, train=False, transform=transform)# Create DataLoader
train_loader = torch.utils.data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_data, batch_size=BATCH_SIZE)# Define the CNN model
class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.model = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=32, kernel_size=7, padding=3, stride=1),nn.ReLU(),nn.MaxPool2d(kernel_size=2, stride=2),nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),nn.ReLU(),nn.MaxPool2d(kernel_size=2, stride=2),nn.Flatten(),nn.Linear(in_features=7*7*64, out_features=1024),nn.ReLU(),nn.Dropout(1 - KEEP_PROB_RATE),nn.Linear(in_features=1024, out_features=10),nn.Softmax(dim=1))def forward(self, input):output = self.model(input)return outputnet = Net()
net.to(device)
print(net)# Define loss function and optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=net.parameters(), lr=LEARNING_RATE)# Training and testing process
history = {'Test Loss': [], 'Test Accuracy': []}for epoch in range(1, EPOCHS + 1):process_bar = tqdm(train_loader, unit='step')net.train(True)for step, (train_imgs, labels) in enumerate(process_bar):train_imgs = train_imgs.to(device)labels = labels.to(device)# Forward passoutputs = net(train_imgs)loss = loss_fn(outputs, labels)# Backward pass and optimizationnet.zero_grad()loss.backward()optimizer.step()# Compute accuracypredictions = torch.argmax(outputs, dim=1)accuracy = torch.sum(predictions == labels) / labels.shape[0]# Update progress barprocess_bar.set_description(f"[{epoch}/{EPOCHS}] Loss: {loss.item():.4f}, Acc: {accuracy.item():.4f}")# Evaluate on test setnet.train(False)correct = 0total_loss = 0with torch.no_grad():for test_imgs, labels in test_loader:test_imgs = test_imgs.to(device)labels = labels.to(device)outputs = net(test_imgs)loss = loss_fn(outputs, labels)total_loss += losspredictions = torch.argmax(outputs, dim=1)correct += torch.sum(predictions == labels)test_accuracy = correct / (BATCH_SIZE * len(test_loader))test_loss = total_loss / len(test_loader)history['Test Loss'].append(test_loss.item())history['Test Accuracy'].append(test_accuracy.item())process_bar.set_description(f"[{epoch}/{EPOCHS}] Loss: {loss.item():.4f}, Acc: {accuracy.item():.4f}, Test Loss: {test_loss.item():.4f}, Test Acc: {test_accuracy.item():.4f}")process_bar.close()

 


http://www.ppmy.cn/news/1578890.html

相关文章

【数据挖掘】通过心脏病数据案例熟悉数据挖掘的完整过程

心脏病数据挖掘过程 一、加载数据源 # 如果没有安装数据源所依赖的库,则先安装数据源所在的python库: pip install ucimlrepo # 引入pandas和ucimlrepo import pandas as pd from ucimlrepo import fetch_ucirepo# fetch dataset Heart Disease dataset的Id为45 h…

ospf的内容解析

当然,以下是您提供的OSPF(开放最短路径优先)接口配置信息的翻译: --- **OSPF 进程 1,路由器 ID 为 12.1.1.2** **接口信息** 区域:0.0.0.0 (未启用 MPLS TE) **接口&#xff1a…

[网络爬虫] 动态网页抓取 — Selenium 入门操作

🌟想系统化学习爬虫技术?看看这个:[数据抓取] Python 网络爬虫 - 学习手册-CSDN博客 0x01:WebDriver 类基础属性 & 方法 为模仿用户真实操作浏览器的基本过程,Selenium 的 WebDriver 模块提供了一个 WebDriver 类…

《灵珠觉醒:从零到算法金仙的C++修炼》卷三·天劫试炼(33)玲珑宝塔藏珍宝 - 打家劫舍(空间压缩)

《灵珠觉醒:从零到算法金仙的C++修炼》卷三天劫试炼(33)玲珑宝塔藏珍宝 - 打家劫舍(空间压缩) 哪吒在数据修仙界中继续他的修炼之旅。这一次,他来到了一片神秘的玲珑谷,谷中有一座巨大的玲珑宝塔,塔身闪烁着神秘的光芒。谷口有一块巨大的石碑,上面刻着一行文字:“欲…

Python自动点击器开发教程 - 支持键盘连按和鼠标连点

Python自动点击器开发教程 - 支持键盘连按和鼠标连点 这里写目录标题 Python自动点击器开发教程 - 支持键盘连按和鼠标连点项目介绍开发环境安装依赖核心代码解析1. 键盘模拟实现2. 鼠标点击实现 开发要点使用说明注意事项优化建议打包发布项目源码开发心得参考资料成品工具 项…

【MySQL】全面理解Mysql架构

MySQL架构 如图,将MySQL 拆分为 上下两个部分,上层通常叫做 Server层 主要是Mysql 自己实现,下层叫做 存储引擎层,是插件开发模式,Mysql官方最初自己提供了MyISAM,是一种非事务存储引擎,而后有公…

使用 crontab 定时同步服务器文件到本地

https://www.dong-blog.fun/post/1987 1. 安装 sshpass sshpass 是一个可以自动输入密码的工具。如果未安装,运行以下命令安装: • 对于 Debian/Ubuntu 系统: apt update && apt install sshpass• 对于 CentOS/RHEL 系统&#xf…

Lab17_ Blind SQL injection with out-of-band data exfiltration

文章目录 前言:进入实验室构造 payload 前言: 实验室标题为: 带外数据泄露的 SQL 盲注 简介: 本实验包含一个SQL盲目注入漏洞。应用程序使用跟踪Cookie进行分析,并执行包含提交的Cookie值的SQL查询。 SQL查询是异…