当前位置: 首页 > news >正文

矩阵补全IGMC 学习笔记

目录

Inductive Graph-based Matrix Completion (IGMC) 模型

igmc推理示例:


Inductive Graph-based Matrix Completion (IGMC) 模型

原版代码:

IGMC/models.py at master · muhanzhang/IGMC · GitHub

GNN推理示例

torch_geometric版本:torch_geometric-2.5.3

原版报错,edge_type找不到,通过删除参数修正的:

import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import GCNConv, global_add_pool
from torch_geometric.utils import dropout_adj
from torch_geometric.data import Data, DataLoader
class GNN(torch.nn.Module):# a base GNN class, GCN message passing + sum_poolingdef __init__(self, dataset, gconv=GCNConv, latent_dim=[32, 32, 32, 1],regression=False, adj_dropout=0.2, force_undirected=False):super(GNN, self).__init__()self.regression = regressionself.adj_dropout = adj_dropoutself.force_undirected = force_undirectedself.convs = torch.nn.ModuleList()self.convs.append(gconv(dataset.num_features, latent_dim[0]))for i in range(0, len(latent_dim)-1):self.convs.append(gconv(latent_dim[i], latent_dim[i+1]))self.lin1 = Linear(sum(latent_dim), 128)if self.regression:self.lin2 = Linear(128, 1)else:self.lin2 = Linear(128, dataset.num_classes)def reset_parameters(self):for conv in self.convs:conv.reset_parameters()self.lin1.reset_parameters()self.lin2.reset_parameters()def forward(self, data):x, edge_index, batch = data.x, data.edge_index, data.batchif self.adj_dropout > 0:edge_index, _ = dropout_adj(edge_index, p=self.adj_dropout,force_undirected=self.force_undirected, num_nodes=len(x),training=self.training)concat_states = []for conv in self.convs:x = torch.tanh(conv(x, edge_index))concat_states.append(x)concat_states = torch.cat(concat_states, 1)x = global_add_pool(concat_states, batch)x = F.relu(self.lin1(x))x = F.dropout(x, p=0.5, training=self.training)x = self.lin2(x)if self.regression:return x[:, 0]else:return F.log_softmax(x, dim=-1)def __repr__(self):return self.__class__.__name__# 创建一个简单的数据类,用于模拟数据集属性
class SimpleDataset:num_features = 2num_classes = 2# 创建一个简单的图数据集
edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]], dtype=torch.long)
x = torch.tensor([[1, 0], [0, 1], [1, 0], [0, 1]], dtype=torch.float)
batch = torch.tensor([0, 0, 1, 1], dtype=torch.long)# 使用 Data 类构建图数据
data = Data(x=x, edge_index=edge_index, batch=batch)# 构建 DataLoader
loader = DataLoader([data], batch_size=2, shuffle=False)dataset = SimpleDataset()# 实例化模型
model = GNN(dataset)# 模型推理
model.eval()
for data in loader:out = model(data)print(out)

igmc推理示例:


import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Linear, Conv1d
from torch_geometric.nn import GCNConv, RGCNConv, global_sort_pool, global_add_pool
from torch_geometric.utils import dropout_adj
from util_functions import *
import pdb
import time
from torch_geometric.data import Data, DataLoader
class GNN(torch.nn.Module):# a base GNN class, GCN message passing + sum_poolingdef __init__(self, dataset, gconv=GCNConv, latent_dim=[32, 32, 32, 1],regression=False, adj_dropout=0.2, force_undirected=False):super(GNN, self).__init__()self.regression = regressionself.adj_dropout = adj_dropoutself.force_undirected = force_undirectedself.convs = torch.nn.ModuleList()self.convs.append(gconv(dataset.num_features, latent_dim[0]))for i in range(0, len(latent_dim)-1):self.convs.append(gconv(latent_dim[i], latent_dim[i+1]))self.lin1 = Linear(sum(latent_dim), 128)if self.regression:self.lin2 = Linear(128, 1)else:self.lin2 = Linear(128, dataset.num_classes)def reset_parameters(self):for conv in self.convs:conv.reset_parameters()self.lin1.reset_parameters()self.lin2.reset_parameters()def forward(self, data):x, edge_index, batch = data.x, data.edge_index, data.batchif self.adj_dropout > 0:# edge_index, edge_type = dropout_adj(#     edge_index, edge_type, p=self.adj_dropout,#     force_undirected=self.force_undirected, num_nodes=len(x),#     training=self.training# )edge_index, edge_type = dropout_adj(edge_index, p=self.adj_dropout, force_undirected=self.force_undirected, num_nodes=len(x), training=self.training)concat_states = []for conv in self.convs:x = torch.tanh(conv(x, edge_index))concat_states.append(x)concat_states = torch.cat(concat_states, 1)x = global_add_pool(concat_states, batch)x = F.relu(self.lin1(x))x = F.dropout(x, p=0.5, training=self.training)x = self.lin2(x)if self.regression:return x[:, 0]else:return F.log_softmax(x, dim=-1)def __repr__(self):return self.__class__.__name__
class IGMC(GNN):# The GNN model of Inductive Graph-based Matrix Completion.# Use RGCN convolution + center-nodes readout.def __init__(self, dataset, gconv=RGCNConv, latent_dim=[32, 32, 32, 32],num_relations=5, num_bases=2, regression=False, adj_dropout=0.2,force_undirected=False, side_features=False, n_side_features=0,multiply_by=1):super(IGMC, self).__init__(dataset, GCNConv, latent_dim, regression, adj_dropout, force_undirected)self.multiply_by = multiply_byself.convs = torch.nn.ModuleList()self.convs.append(gconv(dataset.num_features, latent_dim[0], num_relations, num_bases))for i in range(0, len(latent_dim)-1):self.convs.append(gconv(latent_dim[i], latent_dim[i+1], num_relations, num_bases))self.lin1 = Linear(2*sum(latent_dim), 128)self.side_features = side_featuresif side_features:self.lin1 = Linear(2*sum(latent_dim)+n_side_features, 128)def forward(self, data):start = time.time()x, edge_index, edge_type, batch = data.x, data.edge_index, data.edge_type, data.batchif self.adj_dropout > 0:edge_index, edge_type = dropout_adj(edge_index, edge_type, p=self.adj_dropout,force_undirected=self.force_undirected, num_nodes=len(x),training=self.training)concat_states = []for conv in self.convs:x = torch.tanh(conv(x, edge_index, edge_type))concat_states.append(x)concat_states = torch.cat(concat_states, 1)users = data.x[:, 0] == 1items = data.x[:, 1] == 1x = torch.cat([concat_states[users], concat_states[items]], 1)if self.side_features:x = torch.cat([x, data.u_feature, data.v_feature], 1)x = F.relu(self.lin1(x))x = F.dropout(x, p=0.5, training=self.training)x = self.lin2(x)if self.regression:return x[:, 0] * self.multiply_byelse:return F.log_softmax(x, dim=-1)class SimpleDataset:num_features = 2num_classes = 2# 创建一个简单的图数据集
edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]], dtype=torch.long)
edge_type = torch.tensor([0, 1, 2, 3], dtype=torch.long)
x = torch.tensor([[1, 0], [0, 1], [1, 0], [0, 1]], dtype=torch.float)
batch = torch.tensor([0, 0, 1, 1], dtype=torch.long)# 使用 Data 类构建图数据
data = Data(x=x, edge_index=edge_index,edge_type=edge_type, batch=batch)# 构建 DataLoader
loader = DataLoader([data], batch_size=2, shuffle=False)dataset = SimpleDataset()# 实例化模型
model = IGMC(dataset)# 模型推理
model.eval()
for data in loader:out = model(data)print(out)

相关文章:

  • ubuntu中如何使用supervisor管理进程
  • 毕业设计——可视化实验仿真平台
  • 【Ruby简单脚本01】查看wifi密码
  • 产品Web3D交互展示有什么优势?如何快速制作?
  • 构建 Linux 内核指南
  • 给wordpress添加限制游客浏览数量功能
  • 分布式事务在Java中的实现与优化
  • 密码(6)
  • 筛质数(暴力法、埃氏筛、欧拉筛)
  • Redisson-Lock-加锁原理
  • 02--MySQL数据库概述
  • 自动化开发任务:在PHP框架中实现自定义命令
  • 微信小程序-伪类选择器
  • 【少儿编程Python:趣味编程,探索未来】第一章 启航编程之旅,开启智慧之门
  • 系统思考与创新解决
  • [数据结构]链表的实现在PHP中
  • C++类中的特殊成员函数
  • es的写入过程
  • Intervention/image 图片处理扩展包的安装和使用
  • iOS仿今日头条、壁纸应用、筛选分类、三方微博、颜色填充等源码
  • JavaScript 无符号位移运算符 三个大于号 的使用方法
  • Kibana配置logstash,报表一体化
  • Magento 1.x 中文订单打印乱码
  • mongodb--安装和初步使用教程
  • MySQL主从复制读写分离及奇怪的问题
  • nginx 负载服务器优化
  • SpiderData 2019年2月25日 DApp数据排行榜
  • spring学习第二天
  • 关于Android中设置闹钟的相对比较完善的解决方案
  • 基于 Ueditor 的现代化编辑器 Neditor 1.5.4 发布
  • 理解 C# 泛型接口中的协变与逆变(抗变)
  • 每天10道Java面试题,跟我走,offer有!
  • 盘点那些不知名却常用的 Git 操作
  • 如何编写一个可升级的智能合约
  • 删除表内多余的重复数据
  • 移动端解决方案学习记录
  • 译有关态射的一切
  • 赢得Docker挑战最佳实践
  • 回归生活:清理微信公众号
  • ​Java基础复习笔记 第16章:网络编程
  • ​什么是bug?bug的源头在哪里?
  • #ifdef 的技巧用法
  • #QT(智能家居界面-界面切换)
  • (2)(2.4) TerraRanger Tower/Tower EVO(360度)
  • (C11) 泛型表达式
  • (ZT) 理解系统底层的概念是多么重要(by趋势科技邹飞)
  • (附源码)ssm高校实验室 毕业设计 800008
  • (附源码)ssm旅游企业财务管理系统 毕业设计 102100
  • (三)centos7案例实战—vmware虚拟机硬盘挂载与卸载
  • (三)elasticsearch 源码之启动流程分析
  • (四)c52学习之旅-流水LED灯
  • (转)linux自定义开机启动服务和chkconfig使用方法
  • ***通过什么方式***网吧
  • .desktop 桌面快捷_Linux桌面环境那么多,这几款优秀的任你选
  • .net core 3.0 linux,.NET Core 3.0 的新增功能