当前位置: 首页 > news >正文

软件2_算法功能23

这里写目录标题

  • 功能2
  • 功能3
  • 功能23

功能2

import xgboost as xgb
import lightgbm as lgbm
import pandas as pd
import numpy as np
import pymysql
import xgboost as xgb
import lightgbm as lgbm
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from datetime import date, timedelta
import gc
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pymysql
from sklearn.decomposition import PCA
from flask import Flask, request, session
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from flask_cors import CORS
import psycopg2
from dataclasses import dataclass
import pymysql
import json
from flask import Flask, request, url_for, redirect, render_template, jsonify
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False  # 解决中文乱码问题
import warnings
warnings.filterwarnings("ignore")
import joblib
import datetime
# import mysql.connector
import time
import random
import json
import collections
import random
import matplotlib.pyplot as plt
import os
import copy
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import pandas as pd
import xgboost as xgb
import psycopg2
import json
import psycopg2
from psycopg2 import Binary
import joblib
import pickle
import lightgbm as lgb
from joblib import dump, load
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sqlalchemy import create_engine
import json
from gevent import pywsgi
import knn
import creat_table
import random_forest
app = Flask(__name__)
CORS(app)
#################################################################################### 1 获取数据
# host = "localhost"
# port = "5432"
# database = "copd"
# user = "postgres"
# password = 111111
#
host = "10.16.48.219"
port = "5432"
database = "software1"
user = "pg"
password = 111111
def connect_pg():pg_connection = psycopg2.connect(database=database,user=user,password=password,host=host,port=port)return pg_connection
def connect_mysql():connection = pymysql.connect(host='10.16.48.219',user='root',password='111111',database='medical',cursorclass=pymysql.cursors.DictCursor)return connection
## connection, table_name
def get_data(connection, table_name):query = f"select * from \"{table_name}\""data = pd.read_sql(query, connection)connection.close()return data
#################################################################################### 2 数据处理:处理 字段、特征
def per_data(data, disease_code):# 1 列名(直接获取列名)params = data.columnsprint(params)# 2 筛选列名(自己定义需要的字段)params = [col for col in params if col not in ['Case_ID', 'BUN', 'M1_M2', 'TH2', 'TH2', 'IBILI', 'GLO']]features = data[params]features = features.fillna(0)print(data)print(features)# 3 筛选样本(根据test_id)train = features.iloc[:150]test = features.iloc[150:]return train, test
#################################################################################### 3 模型训练
# xgb
def train_xgb(train, test, model_name):params = {"booster": 'gbtree','objective': 'binary:logistic','eval_metric': 'auc','silent': 0,  # (静默模式,10)'eta': 0.01,  # (0.01~0.2,,,0.01)'max_depth': 5,  # (3~10,,,6)'min_child_weight': 1,'gamma': 0,'lambda': 1,'colsample_bylevel': 0.7,  # (作用与subsample相似)'colsample_bytree': 0.7,  # (0.5~1)'subsample': 0.9,  # (0.5~1)'scale_pos_weight': 1,  # (算法更快收敛)}dtrain = xgb.DMatrix(train.drop(['label'], axis=1), label=train['label'])dtest = xgb.DMatrix(test.drop(['label'], axis=1))watchlist = [(dtrain, 'train')]model = xgb.train(params, dtrain, 200, watchlist)predict = model.predict(dtest)predict = pd.DataFrame(predict, columns=['target'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 1model_name = model_namemodel_description = 'xgboost_model'trainer = 'Gpb'model_str = pickle.dumps(model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# gbdt
def train_gbdt(train, test, model_name):print("%%%%训练_model_gbdt%%%%")gbdt_model = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']gbdt_model.fit(X_train, y_train)y_pred = gbdt_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 2model_name = model_namemodel_description = 'gbdt_model'trainer = 'Gpb'model_str = pickle.dumps(gbdt_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# lr
def train_lr(train, test, model_name):print("%%%%训练_model_lr%%%%")lr_model = LogisticRegression(random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']lr_model.fit(X_train, y_train)y_pred = lr_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 3model_name = model_namemodel_description = 'lr_model'trainer = 'Gpb'model_str = pickle.dumps(lr_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# svm
def train_svm(train, test, model_name):print("%%%%训练_model_svm%%%%")svm_model = LinearSVC(C=1.0, random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']svm_model.fit(X_train, y_train)y_pred = svm_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 4model_name = model_namemodel_description = 'svm_model'trainer = 'Gpb'model_str = pickle.dumps(svm_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# rf
def train_rf(train, test, model_name):print("%%%%训练_model_rf%%%%")rf_model = RandomForestClassifier(n_estimators=100, random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']rf_model.fit(X_train, y_train)y_pred = rf_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 5model_name = model_namemodel_description = 'rf_model'trainer = 'Gpb'model_str = pickle.dumps(rf_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# cart
def train_cart(train, test, model_name):print("%%%%训练_model_cart%%%%")cart_model = DecisionTreeClassifier(criterion='gini', max_depth=5, random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']cart_model.fit(X_train, y_train)y_pred = cart_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 6model_name = model_namemodel_description = 'cart_model'trainer = 'Gpb'model_str = pickle.dumps(cart_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 1
#################################################################################### 接口2:模型训练
# 前端给算法的请求  四个参数(model_name, algorithm_code, table_name, disease_code):
# {
#             "model_name": "慢性非阻塞_cart",
#             "algorithm_code": 6,
#             "table_name": "Diabetes",
#             "disease_code": "慢性阻塞性肺病"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": 1 //训练好的模型,存入数据库,返回数据库中的id
#         }
#     ]
# }
def per_data_pulic():model_name = "慢性非阻塞_xgb"algorithm_code = 1table_name = "merge_copy2"disease_code = "慢性阻塞性肺病"connection_pulic = psycopg2.connect(database="medical",user="pg",password=111111,host="10.16.48.219",port="5432")data = get_data(connection_pulic, table_name)data.fillna(0, inplace=True)def try_convert_to_float(value):if isinstance(value, datetime.date):return valuetry:return float(value)except ValueError:return valuedata = data.applymap(try_convert_to_float)numeric_cols = data.select_dtypes(include=['int', 'float']).columns.tolist()numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()print(numeric_cols)numeric_cols_data = data.loc[:, numeric_cols]numeric_cols_data['label'] = 0numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)numeric_cols_data.drop(columns=['diagname'], inplace=True)print(numeric_cols_data)train = numeric_cols_data.iloc[:200]test = numeric_cols_data.iloc[-200:]return train, test
def train_model_cs(model_name, algorithm_code, table_name, disease_code):connection = connect_pg()data = get_data(connection, table_name)train, test = per_data_pulic()if algorithm_code == 1:predict = train_xgb(train, test, model_name)print(predict)print('train_test_xgb')elif algorithm_code == 2:predict = train_gbdt(train, test, model_name)print(predict)print('train_test_gbdt')elif algorithm_code == 3:predict = train_lr(train, test, model_name)print(predict)print('train_test_lr')elif algorithm_code == 4:predict = train_svm(train, test, model_name)print(predict)print('train_test_svm')elif algorithm_code == 5:predict = train_rf(train, test, model_name)print(predict)print('train_test_rf')elif algorithm_code == 6:predict = train_cart(train, test, model_name)print(predict)print('train_test_cart')else:return 0return predict
# 训练 (model_name, algorithm_code, table_name, disease_code):
# model_name = "慢性非阻塞_6"
# algorithm_code =  6
# table_name = "Diabetes"
# disease_code = "慢性阻塞性肺病"
# predict = train_model_cs(model_name, algorithm_code, table_name, disease_code)#   http://127.0.0.1:5000/interface2
# 模型训练
@app.route("/interface2", methods=["POST"])
def train_model():a = requestmodel_name = eval(request.data)['model_name']algorithm_code =  eval(request.data)['algorithm_code']table_name = eval(request.data)['table_name']disease_code =  eval(request.data)['disease_code']# print(data) eval(request.data)['model_name']# model_name = str(request.data).form["model_name"]# algorithm_code = request.body.get("algorithm_code")# table_name = request.body.get("table_name")# disease_code = request.body.get("disease_code")print(model_name,algorithm_code,table_name,disease_code)connection = connect_pg()data = get_data(connection, table_name)train, test = per_data_pulic()if algorithm_code == 1:predict = train_xgb(train, test, model_name)print(predict)print('train_test_xgb')elif algorithm_code == 2:predict = train_gbdt(train, test, model_name)print(predict)print('train_test_gbdt')elif algorithm_code == 3:predict = train_lr(train, test, model_name)print(predict)print('train_test_lr')elif algorithm_code == 4:predict = train_svm(train, test, model_name)print(predict)print('train_test_svm')elif algorithm_code == 5:predict = train_rf(train, test, model_name)print(predict)print('train_test_rf')elif algorithm_code == 6:predict = train_cart(train, test, model_name)print(predict)print('train_test_cart')else:return 0return "1"#################################################################################### 接口5:预测id选择
# 前端给算法的请求   点击即可
# {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"   //重新返回id , 这里会有预测结果
#         }
#     ]
# }# pulic_test 数据读取
def per_data_pulic_test(test_id):table_name = "merge_copy2"disease_code = "慢性阻塞性肺病"connection_pulic = psycopg2.connect(database="medical",user="pg",password=111111,host="10.16.48.219",port="5432")data = get_data(connection_pulic, table_name)data.fillna(0, inplace=True)# 自动的选取 int和float列 方便 机器学习模型 训练def try_convert_to_float(value):if isinstance(value, datetime.date):return valuetry:return float(value)except ValueError:return valuedata = data.applymap(try_convert_to_float)numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()numeric_cols.append('patient_id')               # 保留 patient_id 用于筛选 test_idnumeric_cols_data = data.loc[:, numeric_cols]numeric_cols_data['label'] = 0numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)# 筛选 test_id   筛选完就删除 patient_id列# numeric_cols_data['patient_id']如果是float类型就改为 int类型,其他类型的不变numeric_cols_data['patient_id'] = numeric_cols_data['patient_id'].apply(lambda x: int(x) if isinstance(x, float) else x)numeric_cols_data['patient_id'] = numeric_cols_data['patient_id'].astype(str)numeric_cols_data = numeric_cols_data.loc[numeric_cols_data['patient_id'].isin(test_id)]patient_id_copy = numeric_cols_data.copy()numeric_cols_data.drop(columns=['diagname'], inplace=True)numeric_cols_data.drop(columns=['patient_id'], inplace=True)return numeric_cols_data , patient_id_copy
def test_model_cs(model_name, test_id):test , patient_id_copy = per_data_pulic_test(test_id)connection = connect_pg()cursor = connection.cursor()x_test = test.drop(['label'], axis=1)# # 从数据库中检索模型字段query = "SELECT training_parameters FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1cursor.execute(query, (model_name,))model_str = cursor.fetchone()[0]# # 从数据库中检索模型字段query = "SELECT type FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1cursor.execute(query, (model_name,))type = cursor.fetchone()[0]loaded_model1111111 = pickle.loads(model_str)if type == 1:dtest = xgb.DMatrix(x_test)predictsloaded_model1111111 = loaded_model1111111.predict(dtest)else:predictsloaded_model1111111 = loaded_model1111111.predict(x_test)print('------------------------------test----patient_id_copy--------------------------')patient_id_copy = patient_id_copy.reset_index(drop=True)predict_df = pd.DataFrame({'predict': predictsloaded_model1111111})result_df = pd.concat([patient_id_copy, predict_df], axis=1)result_df['predict'] = result_df['predict'].apply(lambda x: 1 if x > 0.5 else 0)print(result_df)result_df  = result_df[['patient_id', 'predict']]result_dict = result_df.set_index('patient_id')['predict'].to_dict()print(result_dict)print('------------------------------test----patient_id_copy--------------------------')return result_dict
# 预测 test_model(disease_code, id, test_id):
# model_name = "慢性非阻塞_xgb"
# test_id = ['801080447', '801030382', 'J00356713']
# predict = test_model_cs(model_name, test_id)
# print(predict)
#
# model_name = "慢性非阻塞_xgb"
# test_id = ['801080447', '801030382', 'J00356713']
# 模型预测
@app.route("/interface5", methods=["POST"])
def test_model_cs():model_name = eval(request.data)['model_name']test_id = eval(request.data)['test_id']  # test_id = ['801080447', '801030382', 'J00356713'] 里面的字符串是merge表中的parent_id字段test , patient_id_copy = per_data_pulic_test(test_id)connection = connect_pg()cursor = connection.cursor()x_test = test.drop(['label'], axis=1)# # 从数据库中检索模型字段query = "SELECT training_parameters FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1cursor.execute(query, (model_name,))model_str = cursor.fetchone()[0]# # 从数据库中检索模型字段query = "SELECT type FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1cursor.execute(query, (model_name,))type = cursor.fetchone()[0]loaded_model1111111 = pickle.loads(model_str)if type == 1:dtest = xgb.DMatrix(x_test)predictsloaded_model1111111 = loaded_model1111111.predict(dtest)else:predictsloaded_model1111111 = loaded_model1111111.predict(x_test)print('------------------------------test----patient_id_copy--------------------------')patient_id_copy = patient_id_copy.reset_index(drop=True)predict_df = pd.DataFrame({'predict': predictsloaded_model1111111})result_df = pd.concat([patient_id_copy, predict_df], axis=1)result_df['predict'] = result_df['predict'].apply(lambda x: 1 if x > 0.5 else 0)print(result_df)result_df  = result_df[['patient_id', 'predict']]result_dict = result_df.set_index('patient_id')['predict'].to_dict()print(result_dict)print('------------------------------test----patient_id_copy--------------------------')return result_dictif __name__ == '__main__':app.config['JSON_AS_ASCII'] = FalseCORS(app)app.run(host='127.0.0.1', port=5000)

功能3

import xgboost as xgb
import lightgbm as lgbm
import pandas as pd
import numpy as np
import pymysql
import xgboost as xgb
import lightgbm as lgbm
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from datetime import date, timedelta
import gc
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pymysql
from sklearn.decomposition import PCA
from flask import Flask, request,session
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from flask_cors import CORS
import psycopg2
from dataclasses import dataclass
import pymysql
from flask import Flask, request, url_for, redirect, render_template, jsonify
app = Flask(__name__)
import warnings
warnings.filterwarnings("ignore")
import joblib
import datetime
#import mysql.connector
import time
import random
import json
import collections
import random
import matplotlib.pyplot as plt
import os
import copy
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import pandas as pd
import xgboost as xgb
import json
import psycopg2
from psycopg2 import Binary
import joblib
import pickle
import lightgbm as lgb
from joblib import dump, load
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sqlalchemy import create_engine
import json
from gevent import pywsgi
import knn
import creat_table
import random_forest
app = Flask(__name__)
CORS(app)
#################################################################################### 1 获取数据
# host = "localhost"
# port = "5432"
# database = "copd"
# user = "postgres"
# password = 111111
#
host = "10.16.48.219"
port = "5432"
database = "medical"
user = "pg"
password = 111111
def connect_pg_3():pg_connection = psycopg2.connect(database="medical",user=user,password=password,host=host,port=port)return pg_connection
def connect_pg_software1():pg_connection = psycopg2.connect(database="software1",user=user,password=password,host=host,port=port)return pg_connection
def connect_mysql():connection = pymysql.connect(host='10.16.48.219',user='root',password='111111',database='public',cursorclass=pymysql.cursors.DictCursor)return connection
## connection, table_name
def get_data(connection, table_name):query = f"select * from \"{table_name}\""data = pd.read_sql(query, connection)connection.close()return data
# 接口 3_5
# 计算 0 1 比例(数据,疾病名称,条件列,范围)
def Interface3_5_hbl(test_name,test_describe,test_conditions,disease,table_name,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right):# 存储 test 信息 test_name,test_describe,test_conditionsconnection = connect_pg_3()data = get_data(connection, table_name)#1 age 条件筛选data['age'] = data['age'].astype(int)data = data.loc[data['age'] > age_left]data = data.loc[data['age'] < age_right]#2 sexname 条件筛选data = data.loc[data['sexname'] == sexname]print(data['sexname'])#3 city 条件筛选data = data.loc[data['city'] == city]print(data['city'])data['diagname_code'] = 0data.loc[data['diagname'] == disease, 'diagname_code'] = 1zba = data.copy()zbb = data.copy()#4 AB试验 统一指标 左右区间# Azba[zb] = zba[zb].astype(float)zba = zba.loc[zba[zb] > zba_left]zba = zba.loc[zba[zb] < zba_right]# Bzbb[zb] = zbb[zb].astype(float)zbb = zbb.loc[zbb[zb] > zbb_left]zbb = zbb.loc[zbb[zb] < zbb_right]# 疾病比例计算print('zba样本数:', len(zba))zba_1 = len(zba[zba['diagname_code'] == 1])zba_0 = len(zba[zba['diagname_code'] == 0])zba_reat = zba_1 / (zba_1 + zba_0)# 疾病比例计算print('zbb样本数:',len(zbb))zbb_1 = len(zbb[zbb['diagname_code'] == 1])zbb_0 = len(zbb[zbb['diagname_code'] == 0])zbb_reat = zbb_1 / (zbb_1 + zbb_0)################################ 连接到 pg 数据库connection = connect_pg_software1()cursor = connection.cursor()# current_time = datetime.datetime.now(# 构造 SQL INSERT 语句query = "INSERT INTO testab_model1 (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat) VALUES (%s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s)"cursor.execute(query, (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat))connection.commit()# 关闭数据库连接cursor.close()connection.close()return zba_reat,zbb_reat@app.route("/interface3_5", methods=["POST"])
def Interface3_5():# a_ms = "aaaaaa"# b_ms = "bbbbbb"a_ms = eval(request.data)['a_ms']b_ms = eval(request.data)['b_ms']test_name = eval(request.data)['test_name']test_describe = eval(request.data)['test_describe']test_conditions = eval(request.data)['test_conditions']disease = eval(request.data)['disease']table_name = eval(request.data)['table_name']age_left = eval(request.data)['age_left']age_right = eval(request.data)['age_right']sexname = eval(request.data)['sexname']city = eval(request.data)['city']zb = eval(request.data)['zb']zba_left = eval(request.data)['zba_left']zba_right = eval(request.data)['zba_right']zbb_left = eval(request.data)['zbb_left']zbb_right = eval(request.data)['zbb_right']# 存储 test 信息 test_name,test_describe,test_conditionsconnection = connect_pg_3()data = get_data(connection, table_name)#1 age 条件筛选age_left = float(age_left)age_right = float(age_right)data['age'] = data['age'].astype(int)data = data.loc[data['age'] > age_left]data = data.loc[data['age'] < age_right]#2 sexname 条件筛选data = data.loc[data['sexname'] == sexname]print(data['sexname'])#3 city 条件筛选data = data.loc[data['city'] == city]print(data['city'])data['diagname_code'] = 0data.loc[data['diagname'] == disease, 'diagname_code'] = 1zba = data.copy()zbb = data.copy()#4 AB试验 统一指标 左右区间# Azba_left = float(zba_left)zba_right = float(zba_right)zba[zb] = zba[zb].astype(float)zba = zba.loc[zba[zb] > zba_left]zba = zba.loc[zba[zb] < zba_right]# Bzbb_left = float(zbb_left)zbb_right = float(zbb_right)zbb[zb] = zbb[zb].astype(float)zbb = zbb.loc[zbb[zb] > zbb_left]zbb = zbb.loc[zbb[zb] < zbb_right]# 疾病比例计算print('zba样本数:', len(zba))zba_1 = len(zba[zba['diagname_code'] == 1])zba_0 = len(zba[zba['diagname_code'] == 0])if ((zba_1 + zba_0)==0):zba_reat = 0else:zba_reat = zba_1 / (zba_1 + zba_0)# 疾病比例计算strlen = len(zbb)print('zbb样本数:',len(zbb))zbb_1 = len(zbb[zbb['diagname_code'] == 1])zbb_0 = len(zbb[zbb['diagname_code'] == 0])if ((zba_1 + zba_0)==0):zba_reat = 0else:zbb_reat = zbb_1 / (zbb_1 + zbb_0)zbb_reat=0################################ 连接到 pg 数据库connection = connect_pg_software1()cursor = connection.cursor()# current_time = datetime.datetime.now(# 构造 SQL INSERT 语句print('test_name',test_name)print('test_describe',test_describe)print('test_conditions',test_conditions)print('disease',disease)print('age_left',age_left)print('age_right',age_right)print('sexname',sexname)print('city',city)print('zb',zb)print('zba_left',zba_left)print('zba_right',zba_right)print('zba_left',zbb_left)print('zbb_right',zbb_right)print('table_name',table_name)print('zba_reat',zba_reat)print('zbb_reat',zbb_reat)query = "INSERT INTO testab_model1 (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat) VALUES (%s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s)"cursor.execute(query, (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat))connection.commit()# 关闭数据库连接cursor.close()connection.close()data = {"strlen": strlen,"test_name": test_name,"test_describe": test_describe,"disease": disease,"test_conditions": test_conditions,"table_name": table_name,"a_ms": a_ms,"b_ms": b_ms,"zba_reat": zba_reat,"zbb_reat": zbb_reat}return jsonify(data)# (数据,疾病名称,条件列,范围)
# 特殊标签 AB验证
connection = connect_pg_3()
data = get_data(connection, "merge")
###### 输入 (test_name,test_describe,test_conditions,disease,table_name,age,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right):
# 试验标题
test_name = '高血压是否影响慢性阻塞性肺病3'
# 试验描述
test_describe = '判断高血压是否影响慢性阻塞性肺病3'
# 试验场景
test_conditions = '临床试验'
# 疾病      接口
disease = '慢性阻塞性肺病'
# 选择数据   接口
table_name = 'merge'
# 1 年龄 左右区间
age_left = 60
age_right = 80
# 2 性别
sexname = '男性'
# 3 城市
city = '绵阳市'
# 4 AB试验 统一指标 左右区间
zb = 'LYMPH_per'
zba_left = 6
zba_right = 80
zbb_left = 6
zbb_right = 80
#
zba_reat,zbb_reat = Interface3_5_hbl(test_name,test_describe,test_conditions,disease,table_name,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right)
print("zba_reat患病率为:",zba_reat*100,"%")
print("zbb_reat患病率为:",zbb_reat*100,"%")###### 输出
# 展示样本量if __name__ == '__main__':app.config['JSON_AS_ASCII'] = FalseCORS(app)app.run(host='127.0.0.1', port=5000)

功能23

import xgboost as xgb
import lightgbm as lgbm
import pandas as pd
import numpy as np
import pymysql
import xgboost as xgb
import lightgbm as lgbm
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import StratifiedKFold
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
from datetime import date, timedelta
import gc
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import pymysql
from sklearn.decomposition import PCA
from flask import Flask, request, session
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from flask_cors import CORS
import psycopg2
from dataclasses import dataclass
import pymysql
import json
from flask import Flask, request, url_for, redirect, render_template, jsonify
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False  # 解决中文乱码问题
import warnings
warnings.filterwarnings("ignore")
import joblib
import datetime
# import mysql.connector
import time
import random
import json
import collections
import random
import matplotlib.pyplot as plt
import os
import copy
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
import pandas as pd
import xgboost as xgb
import psycopg2
import json
import psycopg2
from psycopg2 import Binary
import joblib
import pickle
import lightgbm as lgb
from joblib import dump, load
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sqlalchemy import create_engine
import json
from gevent import pywsgi
import knn
import creat_table
import random_forest
app = Flask(__name__)
CORS(app)
#################################################################################### 1 获取数据
# host = "localhost"
# port = "5432"
# database = "copd"
# user = "postgres"
# password = 111111
#
host = "10.16.48.219"
port = "5432"
database = "software1"
user = "pg"
password = 111111
def connect_pg():pg_connection = psycopg2.connect(database=database,user=user,password=password,host=host,port=port)return pg_connection
def connect_mysql():connection = pymysql.connect(host='10.16.48.219',user='root',password='111111',database='medical',cursorclass=pymysql.cursors.DictCursor)return connection
## connection, table_name
def get_data(connection, table_name):query = f"select * from \"{table_name}\""data = pd.read_sql(query, connection)connection.close()return data
#################################################################################### 2 数据处理:处理 字段、特征
def per_data(data, disease_code):# 1 列名(直接获取列名)params = data.columnsprint(params)# 2 筛选列名(自己定义需要的字段)params = [col for col in params if col not in ['Case_ID', 'BUN', 'M1_M2', 'TH2', 'TH2', 'IBILI', 'GLO']]features = data[params]features = features.fillna(0)print(data)print(features)# 3 筛选样本(根据test_id)train = features.iloc[:150]test = features.iloc[150:]return train, test
#################################################################################### 3 模型训练
# xgb
def train_xgb(train, test, model_name):params = {"booster": 'gbtree','objective': 'binary:logistic','eval_metric': 'auc','silent': 0,  # (静默模式,10)'eta': 0.01,  # (0.01~0.2,,,0.01)'max_depth': 5,  # (3~10,,,6)'min_child_weight': 1,'gamma': 0,'lambda': 1,'colsample_bylevel': 0.7,  # (作用与subsample相似)'colsample_bytree': 0.7,  # (0.5~1)'subsample': 0.9,  # (0.5~1)'scale_pos_weight': 1,  # (算法更快收敛)}dtrain = xgb.DMatrix(train.drop(['label'], axis=1), label=train['label'])dtest = xgb.DMatrix(test.drop(['label'], axis=1))watchlist = [(dtrain, 'train')]model = xgb.train(params, dtrain, 200, watchlist)predict = model.predict(dtest)predict = pd.DataFrame(predict, columns=['target'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 1model_name = model_namemodel_description = 'xgboost_model'trainer = 'Gpb'model_str = pickle.dumps(model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# gbdt
def train_gbdt(train, test, model_name):print("%%%%训练_model_gbdt%%%%")gbdt_model = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=3, random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']gbdt_model.fit(X_train, y_train)y_pred = gbdt_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 2model_name = model_namemodel_description = 'gbdt_model'trainer = 'Gpb'model_str = pickle.dumps(gbdt_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# lr
def train_lr(train, test, model_name):print("%%%%训练_model_lr%%%%")lr_model = LogisticRegression(random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']lr_model.fit(X_train, y_train)y_pred = lr_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 3model_name = model_namemodel_description = 'lr_model'trainer = 'Gpb'model_str = pickle.dumps(lr_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# svm
def train_svm(train, test, model_name):print("%%%%训练_model_svm%%%%")svm_model = LinearSVC(C=1.0, random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']svm_model.fit(X_train, y_train)y_pred = svm_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 4model_name = model_namemodel_description = 'svm_model'trainer = 'Gpb'model_str = pickle.dumps(svm_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# rf
def train_rf(train, test, model_name):print("%%%%训练_model_rf%%%%")rf_model = RandomForestClassifier(n_estimators=100, random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']rf_model.fit(X_train, y_train)y_pred = rf_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 5model_name = model_namemodel_description = 'rf_model'trainer = 'Gpb'model_str = pickle.dumps(rf_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 0
# cart
def train_cart(train, test, model_name):print("%%%%训练_model_cart%%%%")cart_model = DecisionTreeClassifier(criterion='gini', max_depth=5, random_state=42)X_train = train.drop(['label'], axis=1)x_test = test.drop(['label'], axis=1)y_train = train['label']cart_model.fit(X_train, y_train)y_pred = cart_model.predict(x_test)y_pred = pd.DataFrame(y_pred, columns=['prob'])################################ 连接到 pg 数据库connection = connect_pg()cursor = connection.cursor()# current_time = datetime.datetime.now()# time = current_timetype = 6model_name = model_namemodel_description = 'cart_model'trainer = 'Gpb'model_str = pickle.dumps(cart_model)# 构造 SQL INSERT 语句query = "INSERT INTO train_model3 (type, model_name, model_description, trainer, training_parameters) VALUES (%s, %s, %s, %s, %s)"cursor.execute(query, (type, model_name, model_description, trainer, model_str))connection.commit()# 关闭数据库连接cursor.close()connection.close()return 1
#################################################################################### 接口2:模型训练
# 前端给算法的请求  四个参数(model_name, algorithm_code, table_name, disease_code):
# {
#             "model_name": "慢性非阻塞_cart",
#             "algorithm_code": 6,
#             "table_name": "Diabetes",
#             "disease_code": "慢性阻塞性肺病"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": 1 //训练好的模型,存入数据库,返回数据库中的id
#         }
#     ]
# }
def per_data_pulic():model_name = "慢性非阻塞_xgb"algorithm_code = 1table_name = "merge_copy2"disease_code = "慢性阻塞性肺病"connection_pulic = psycopg2.connect(database="medical",user="pg",password=111111,host="10.16.48.219",port="5432")data = get_data(connection_pulic, table_name)data.fillna(0, inplace=True)def try_convert_to_float(value):if isinstance(value, datetime.date):return valuetry:return float(value)except ValueError:return valuedata = data.applymap(try_convert_to_float)numeric_cols = data.select_dtypes(include=['int', 'float']).columns.tolist()numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()print(numeric_cols)numeric_cols_data = data.loc[:, numeric_cols]numeric_cols_data['label'] = 0numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)numeric_cols_data.drop(columns=['diagname'], inplace=True)print(numeric_cols_data)train = numeric_cols_data.iloc[:200]test = numeric_cols_data.iloc[-200:]return train, test
def train_model_cs(model_name, algorithm_code, table_name, disease_code):connection = connect_pg()data = get_data(connection, table_name)train, test = per_data_pulic()if algorithm_code == 1:predict = train_xgb(train, test, model_name)print(predict)print('train_test_xgb')elif algorithm_code == 2:predict = train_gbdt(train, test, model_name)print(predict)print('train_test_gbdt')elif algorithm_code == 3:predict = train_lr(train, test, model_name)print(predict)print('train_test_lr')elif algorithm_code == 4:predict = train_svm(train, test, model_name)print(predict)print('train_test_svm')elif algorithm_code == 5:predict = train_rf(train, test, model_name)print(predict)print('train_test_rf')elif algorithm_code == 6:predict = train_cart(train, test, model_name)print(predict)print('train_test_cart')else:return 0return predict
# 训练 (model_name, algorithm_code, table_name, disease_code):
# model_name = "慢性非阻塞_6"
# algorithm_code =  6
# table_name = "Diabetes"
# disease_code = "慢性阻塞性肺病"
# predict = train_model_cs(model_name, algorithm_code, table_name, disease_code)#   http://127.0.0.1:5000/interface2
# 模型训练
@app.route("/interface2", methods=["POST"])
def train_model():a = requestmodel_name = eval(request.data)['model_name']algorithm_code =  eval(request.data)['algorithm_code']table_name = eval(request.data)['table_name']disease_code =  eval(request.data)['disease_code']# print(data) eval(request.data)['model_name']# model_name = str(request.data).form["model_name"]# algorithm_code = request.body.get("algorithm_code")# table_name = request.body.get("table_name")# disease_code = request.body.get("disease_code")print(model_name,algorithm_code,table_name,disease_code)connection = connect_pg()data = get_data(connection, table_name)train, test = per_data_pulic()if algorithm_code == 1:predict = train_xgb(train, test, model_name)print(predict)print('train_test_xgb')elif algorithm_code == 2:predict = train_gbdt(train, test, model_name)print(predict)print('train_test_gbdt')elif algorithm_code == 3:predict = train_lr(train, test, model_name)print(predict)print('train_test_lr')elif algorithm_code == 4:predict = train_svm(train, test, model_name)print(predict)print('train_test_svm')elif algorithm_code == 5:predict = train_rf(train, test, model_name)print(predict)print('train_test_rf')elif algorithm_code == 6:predict = train_cart(train, test, model_name)print(predict)print('train_test_cart')else:return 0return "1"#################################################################################### 接口5:预测id选择
# 前端给算法的请求   点击即可
# {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"
# }
# 算法给前端响应
# {
#     "success": true,
#     "message": "请求成功",
#     "data": [
#         {
#             "id": "121",
#             "id": "122",
#             "id": "123",
#             "id": "124"   //重新返回id , 这里会有预测结果
#         }
#     ]
# }# pulic_test 数据读取
def per_data_pulic_test(test_id):table_name = "merge_copy2"disease_code = "慢性阻塞性肺病"connection_pulic = psycopg2.connect(database="medical",user="pg",password=111111,host="10.16.48.219",port="5432")data = get_data(connection_pulic, table_name)data.fillna(0, inplace=True)# 自动的选取 int和float列 方便 机器学习模型 训练def try_convert_to_float(value):if isinstance(value, datetime.date):return valuetry:return float(value)except ValueError:return valuedata = data.applymap(try_convert_to_float)numeric_cols = data.select_dtypes(include=['int', 'float']).assign(diagname="diagname").columns.tolist()numeric_cols.append('patient_id')               # 保留 patient_id 用于筛选 test_idnumeric_cols_data = data.loc[:, numeric_cols]numeric_cols_data['label'] = 0numeric_cols_data['label'] = numeric_cols_data['diagname'].apply(lambda x: 1 if x == disease_code else 0)numeric_cols_data['label'] = numeric_cols_data['label'].astype(int)# 筛选 test_id   筛选完就删除 patient_id列# numeric_cols_data['patient_id']如果是float类型就改为 int类型,其他类型的不变numeric_cols_data['patient_id'] = numeric_cols_data['patient_id'].apply(lambda x: int(x) if isinstance(x, float) else x)numeric_cols_data['patient_id'] = numeric_cols_data['patient_id'].astype(str)numeric_cols_data = numeric_cols_data.loc[numeric_cols_data['patient_id'].isin(test_id)]patient_id_copy = numeric_cols_data.copy()numeric_cols_data.drop(columns=['diagname'], inplace=True)numeric_cols_data.drop(columns=['patient_id'], inplace=True)return numeric_cols_data , patient_id_copy
def test_model_cs(model_name, test_id):test , patient_id_copy = per_data_pulic_test(test_id)connection = connect_pg()cursor = connection.cursor()x_test = test.drop(['label'], axis=1)# # 从数据库中检索模型字段query = "SELECT training_parameters FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1cursor.execute(query, (model_name,))model_str = cursor.fetchone()[0]# # 从数据库中检索模型字段query = "SELECT type FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1cursor.execute(query, (model_name,))type = cursor.fetchone()[0]loaded_model1111111 = pickle.loads(model_str)if type == 1:dtest = xgb.DMatrix(x_test)predictsloaded_model1111111 = loaded_model1111111.predict(dtest)else:predictsloaded_model1111111 = loaded_model1111111.predict(x_test)print('------------------------------test----patient_id_copy--------------------------')patient_id_copy = patient_id_copy.reset_index(drop=True)predict_df = pd.DataFrame({'predict': predictsloaded_model1111111})result_df = pd.concat([patient_id_copy, predict_df], axis=1)result_df['predict'] = result_df['predict'].apply(lambda x: 1 if x > 0.5 else 0)print(result_df)result_df  = result_df[['patient_id', 'predict']]result_dict = result_df.set_index('patient_id')['predict'].to_dict()print(result_dict)print('------------------------------test----patient_id_copy--------------------------')return result_dict
# 预测 test_model(disease_code, id, test_id):
# model_name = "慢性非阻塞_xgb"
# test_id = ['801080447', '801030382', 'J00356713']
# predict = test_model_cs(model_name, test_id)
# print(predict)
#
# model_name = "慢性非阻塞_xgb"
# test_id = ['801080447', '801030382', 'J00356713']
# 模型预测
@app.route("/interface5", methods=["POST"])
def test_model_cs():model_name = eval(request.data)['model_name']test_id = eval(request.data)['test_id']  # test_id = ['801080447', '801030382', 'J00356713'] 里面的字符串是merge表中的parent_id字段test , patient_id_copy = per_data_pulic_test(test_id)connection = connect_pg()cursor = connection.cursor()x_test = test.drop(['label'], axis=1)# # 从数据库中检索模型字段query = "SELECT training_parameters FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1cursor.execute(query, (model_name,))model_str = cursor.fetchone()[0]# # 从数据库中检索模型字段query = "SELECT type FROM train_model3 WHERE model_name = %s"  # 假设模型 ID1cursor.execute(query, (model_name,))type = cursor.fetchone()[0]loaded_model1111111 = pickle.loads(model_str)if type == 1:dtest = xgb.DMatrix(x_test)predictsloaded_model1111111 = loaded_model1111111.predict(dtest)else:predictsloaded_model1111111 = loaded_model1111111.predict(x_test)print('------------------------------test----patient_id_copy--------------------------')patient_id_copy = patient_id_copy.reset_index(drop=True)predict_df = pd.DataFrame({'predict': predictsloaded_model1111111})result_df = pd.concat([patient_id_copy, predict_df], axis=1)result_df['predict'] = result_df['predict'].apply(lambda x: 1 if x > 0.5 else 0)print(result_df)result_df  = result_df[['patient_id', 'predict']]result_dict = result_df.set_index('patient_id')['predict'].to_dict()print(result_dict)print('------------------------------test----patient_id_copy--------------------------')return result_dictdef connect_pg_3():pg_connection = psycopg2.connect(database="medical",user=user,password=password,host=host,port=port)return pg_connection
def connect_pg_software1():pg_connection = psycopg2.connect(database="software1",user=user,password=password,host=host,port=port)return pg_connection
def connect_mysql():connection = pymysql.connect(host='10.16.48.219',user='root',password='111111',database='public',cursorclass=pymysql.cursors.DictCursor)return connection
## connection, table_name
def get_data(connection, table_name):query = f"select * from \"{table_name}\""data = pd.read_sql(query, connection)connection.close()return data
# 接口 3_5
# 计算 0 1 比例(数据,疾病名称,条件列,范围)
def Interface3_5_hbl(test_name,test_describe,test_conditions,disease,table_name,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right):# 存储 test 信息 test_name,test_describe,test_conditionsconnection = connect_pg_3()data = get_data(connection, table_name)#1 age 条件筛选data['age'] = data['age'].astype(int)data = data.loc[data['age'] > age_left]data = data.loc[data['age'] < age_right]#2 sexname 条件筛选data = data.loc[data['sexname'] == sexname]print(data['sexname'])#3 city 条件筛选data = data.loc[data['city'] == city]print(data['city'])data['diagname_code'] = 0data.loc[data['diagname'] == disease, 'diagname_code'] = 1zba = data.copy()zbb = data.copy()#4 AB试验 统一指标 左右区间# Azba[zb] = zba[zb].astype(float)zba = zba.loc[zba[zb] > zba_left]zba = zba.loc[zba[zb] < zba_right]# Bzbb[zb] = zbb[zb].astype(float)zbb = zbb.loc[zbb[zb] > zbb_left]zbb = zbb.loc[zbb[zb] < zbb_right]# 疾病比例计算print('zba样本数:', len(zba))zba_1 = len(zba[zba['diagname_code'] == 1])zba_0 = len(zba[zba['diagname_code'] == 0])zba_reat = zba_1 / (zba_1 + zba_0)# 疾病比例计算print('zbb样本数:',len(zbb))zbb_1 = len(zbb[zbb['diagname_code'] == 1])zbb_0 = len(zbb[zbb['diagname_code'] == 0])zbb_reat = zbb_1 / (zbb_1 + zbb_0)################################ 连接到 pg 数据库connection = connect_pg_software1()cursor = connection.cursor()# current_time = datetime.datetime.now(# 构造 SQL INSERT 语句query = "INSERT INTO testab_model1 (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat) VALUES (%s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s)"cursor.execute(query, (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat))connection.commit()# 关闭数据库连接cursor.close()connection.close()return zba_reat,zbb_reat@app.route("/interface3_5", methods=["POST"])
def Interface3_5():# a_ms = "aaaaaa"# b_ms = "bbbbbb"a_ms = eval(request.data)['a_ms']b_ms = eval(request.data)['b_ms']test_name = eval(request.data)['test_name']test_describe = eval(request.data)['test_describe']test_conditions = eval(request.data)['test_conditions']disease = eval(request.data)['disease']table_name = eval(request.data)['table_name']age_left = eval(request.data)['age_left']age_right = eval(request.data)['age_right']sexname = eval(request.data)['sexname']city = eval(request.data)['city']zb = eval(request.data)['zb']zba_left = eval(request.data)['zba_left']zba_right = eval(request.data)['zba_right']zbb_left = eval(request.data)['zbb_left']zbb_right = eval(request.data)['zbb_right']# 存储 test 信息 test_name,test_describe,test_conditionsconnection = connect_pg_3()data = get_data(connection, table_name)#1 age 条件筛选age_left = float(age_left)age_right = float(age_right)data['age'] = data['age'].astype(int)data = data.loc[data['age'] > age_left]data = data.loc[data['age'] < age_right]#2 sexname 条件筛选data = data.loc[data['sexname'] == sexname]print(data['sexname'])#3 city 条件筛选data = data.loc[data['city'] == city]print(data['city'])data['diagname_code'] = 0data.loc[data['diagname'] == disease, 'diagname_code'] = 1zba = data.copy()zbb = data.copy()#4 AB试验 统一指标 左右区间# Azba_left = float(zba_left)zba_right = float(zba_right)zba[zb] = zba[zb].astype(float)zba = zba.loc[zba[zb] > zba_left]zba = zba.loc[zba[zb] < zba_right]# Bzbb_left = float(zbb_left)zbb_right = float(zbb_right)zbb[zb] = zbb[zb].astype(float)zbb = zbb.loc[zbb[zb] > zbb_left]zbb = zbb.loc[zbb[zb] < zbb_right]# 疾病比例计算print('zba样本数:', len(zba))zba_1 = len(zba[zba['diagname_code'] == 1])zba_0 = len(zba[zba['diagname_code'] == 0])if ((zba_1 + zba_0)==0):zba_reat = 0else:zba_reat = zba_1 / (zba_1 + zba_0)# 疾病比例计算strlen = len(zbb)print('zbb样本数:',len(zbb))zbb_1 = len(zbb[zbb['diagname_code'] == 1])zbb_0 = len(zbb[zbb['diagname_code'] == 0])if ((zba_1 + zba_0)==0):zba_reat = 0else:zbb_reat = zbb_1 / (zbb_1 + zbb_0)zbb_reat=0################################ 连接到 pg 数据库connection = connect_pg_software1()cursor = connection.cursor()# current_time = datetime.datetime.now(# 构造 SQL INSERT 语句print('test_name',test_name)print('test_describe',test_describe)print('test_conditions',test_conditions)print('disease',disease)print('age_left',age_left)print('age_right',age_right)print('sexname',sexname)print('city',city)print('zb',zb)print('zba_left',zba_left)print('zba_right',zba_right)print('zba_left',zbb_left)print('zbb_right',zbb_right)print('table_name',table_name)print('zba_reat',zba_reat)print('zbb_reat',zbb_reat)query = "INSERT INTO testab_model1 (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat) VALUES (%s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s, %s, %s, %s,%s, %s)"cursor.execute(query, (test_name,test_describe,test_conditions,disease,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right,table_name,zba_reat,zbb_reat))connection.commit()# 关闭数据库连接cursor.close()connection.close()data = {"strlen": strlen,"test_name": test_name,"test_describe": test_describe,"disease": disease,"test_conditions": test_conditions,"table_name": table_name,"a_ms": a_ms,"b_ms": b_ms,"zba_reat": zba_reat,"zbb_reat": zbb_reat}return jsonify(data)# (数据,疾病名称,条件列,范围)
# 特殊标签 AB验证
connection = connect_pg_3()
data = get_data(connection, "merge")
###### 输入 (test_name,test_describe,test_conditions,disease,table_name,age,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right):
# 试验标题
test_name = '高血压是否影响慢性阻塞性肺病3'
# 试验描述
test_describe = '判断高血压是否影响慢性阻塞性肺病3'
# 试验场景
test_conditions = '临床试验'
# 疾病      接口
disease = '慢性阻塞性肺病'
# 选择数据   接口
table_name = 'merge'
# 1 年龄 左右区间
age_left = 60
age_right = 80
# 2 性别
sexname = '男性'
# 3 城市
city = '绵阳市'
# 4 AB试验 统一指标 左右区间
zb = 'LYMPH_per'
zba_left = 6
zba_right = 80
zbb_left = 6
zbb_right = 80
#
zba_reat,zbb_reat = Interface3_5_hbl(test_name,test_describe,test_conditions,disease,table_name,age_left,age_right,sexname,city,zb,zba_left,zba_right,zbb_left,zbb_right)
print("zba_reat患病率为:",zba_reat*100,"%")
print("zbb_reat患病率为:",zbb_reat*100,"%")###### 输出
# 展示样本量if __name__ == '__main__':app.config['JSON_AS_ASCII'] = FalseCORS(app)app.run(host='127.0.0.1', port=5000)

相关文章:

  • 数据库(28)——联合查询
  • Web前端Hack:深入探索、挑战与防范
  • 【C++】深入理解decltype和decltype(auto)
  • MyBatisPlus插件生成代码
  • Web前端 CodeView:深度解析与实用指南
  • .net后端程序发布到nignx上,通过nginx访问
  • 【React】json-server
  • 【第13章】SpringBoot实战篇之项目部署
  • 医疗器械网络安全风险管理的基本步骤
  • 多关键字排序
  • C++STL(四)priority_queue的详细用法及仿函数实现
  • 什么是pump?pump跟单机器人是什么?
  • Windows Docker手动迁移镜像
  • 深入理解交叉熵损失CrossEntropyLoss - 信息论(交叉熵)
  • JVM学习-监控工具(三)
  • 【Leetcode】101. 对称二叉树
  • 【407天】跃迁之路——程序员高效学习方法论探索系列(实验阶段164-2018.03.19)...
  • 【跃迁之路】【735天】程序员高效学习方法论探索系列(实验阶段492-2019.2.25)...
  • angular组件开发
  • canvas 五子棋游戏
  • Date型的使用
  • ES6 ...操作符
  • JavaScript中的对象个人分享
  • js
  • Js基础知识(四) - js运行原理与机制
  • LeetCode541. Reverse String II -- 按步长反转字符串
  • niucms就是以城市为分割单位,在上面 小区/乡村/同城论坛+58+团购
  • python3 使用 asyncio 代替线程
  • Python进阶细节
  • quasar-framework cnodejs社区
  • React中的“虫洞”——Context
  • text-decoration与color属性
  • uva 10370 Above Average
  • Vue UI框架库开发介绍
  • vue-cli3搭建项目
  • 关于 Linux 进程的 UID、EUID、GID 和 EGID
  • 欢迎参加第二届中国游戏开发者大会
  • 解析 Webpack中import、require、按需加载的执行过程
  • 如何进阶一名有竞争力的程序员?
  • 使用 @font-face
  • 文本多行溢出显示...之最后一行不到行尾的解决
  • 消息队列系列二(IOT中消息队列的应用)
  • k8s使用glusterfs实现动态持久化存储
  • Nginx实现动静分离
  • 蚂蚁金服CTO程立:真正的技术革命才刚刚开始
  • 组复制官方翻译九、Group Replication Technical Details
  • # 计算机视觉入门
  • #1015 : KMP算法
  • #if 1...#endif
  • #pragam once 和 #ifndef 预编译头
  • #免费 苹果M系芯片Macbook电脑MacOS使用Bash脚本写入(读写)NTFS硬盘教程
  • $(selector).each()和$.each()的区别
  • (9)STL算法之逆转旋转
  • (day6) 319. 灯泡开关
  • (solr系列:一)使用tomcat部署solr服务