hjp157688 commited on
Commit
5918d46
·
verified ·
1 Parent(s): 02dcc2f

Update pages/对话式文本检测工具.py

Browse files
Files changed (1) hide show
  1. pages/对话式文本检测工具.py +208 -208
pages/对话式文本检测工具.py CHANGED
@@ -1,209 +1,209 @@
1
- import torch
2
- import streamlit as st
3
- import subprocess
4
- import os
5
- import torch
6
- import torch.nn as nn
7
- import torch.nn.functional as F
8
- from transformers import BertTokenizer
9
- import appbuilder
10
- from transformers import BertModel
11
- #加载预训练模型
12
- pretrained = BertModel.from_pretrained('hfl/chinese-macbert-base')
13
- #需要移动到cuda上
14
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
15
- pretrained.to(device)
16
- #不训练,不需要计算梯度
17
- for param in pretrained.parameters():
18
- param.requires_grad_(False)
19
-
20
- #多头注意力机制
21
- class MultiHeadAttention(nn.Module):
22
- def __init__(self, hidden_size, num_heads):
23
- super(MultiHeadAttention, self).__init__()
24
- # 确保隐藏层特征数能够被头数整除
25
- assert hidden_size % num_heads == 0
26
- self.hidden_size = hidden_size
27
- self.num_heads = num_heads
28
- self.head_dim = hidden_size // num_heads # 计算每个头的维度
29
- # 定义线性层,用于对查询、键、值进行线性变换
30
- self.linear_q = nn.Linear(hidden_size, hidden_size)
31
- self.linear_k = nn.Linear(hidden_size, hidden_size)
32
- self.linear_v = nn.Linear(hidden_size, hidden_size)
33
- self.linear_out = nn.Linear(hidden_size, hidden_size) # 定义输出线性层,用于整合多头注意力后的输出
34
-
35
- def forward(self, x):
36
- batch_size, seq_len, _ = x.size()
37
- # 对输入进行线性变换,并将其分割为多个头
38
- q = self.linear_q(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
39
- k = self.linear_k(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
40
- v = self.linear_v(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
41
- # 计算注意力分数
42
- scores = torch.matmul(q, k.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float))
43
- attn_weights = F.softmax(scores, dim=-1) # 计算注意力权重
44
- # 根据注意力权重对值进行加权求和
45
- context = torch.matmul(attn_weights, v).transpose(1, 2).contiguous().view(batch_size, seq_len, self.hidden_size)
46
- out = self.linear_out(context) # 整合多头注意力的输出
47
- return out
48
-
49
- class Model(nn.Module):
50
- def __init__(self):
51
- super(Model, self).__init__()
52
- self.fc1 = nn.Linear(768, 512) # 第一层全连接层
53
- self.fc2 = nn.Linear(512, 256) # 第二层全连接层
54
- self.fc3 = nn.Linear(256, 2) # 第三层全连接层
55
- self.dropout = nn.Dropout(p=0.5)
56
- self.bn1 = nn.BatchNorm1d(512)
57
- self.bn2 = nn.BatchNorm1d(256)
58
- self.activation = nn.ReLU()
59
- self.multihead_attention = MultiHeadAttention(hidden_size=768, num_heads=8) # 多头注意力模块
60
-
61
- def forward(self, input_ids, attention_mask, token_type_ids):
62
- out = pretrained(input_ids=input_ids,
63
- attention_mask=attention_mask,
64
- token_type_ids=token_type_ids).last_hidden_state
65
-
66
- # 应用多头注意力机制
67
- out = self.multihead_attention(out)
68
- out = out[:, 0] # 提取[CLS]标记的输出
69
-
70
- out = self.activation(self.bn1(self.fc1(out)))
71
- out = self.dropout(out)
72
- out = self.activation(self.bn2(self.fc2(out)))
73
- out = self.dropout(out)
74
- out = self.fc3(out)
75
- out = out.softmax(dim=1)
76
- return out
77
-
78
-
79
- def load_models_and_predict(text, device):
80
- # 加载模型
81
- MacBERT_base_CDialBias = torch.load('models\MacBERT-base-CDialBias.pth')
82
- MacBERT_base_CDialBias.to(device)
83
- MacBERT_base_COLD = torch.load('models\MacBERT-base-CDialBias.pth')
84
- MacBERT_base_COLD.to(device)
85
-
86
- # 获取密钥和ID
87
- os.environ['APPBUILDER_TOKEN'] = "bce-v3/ALTAK-n2XgeA6FS3Q5E7Jab6UwE/850b44ebec64c4cad705986ab0b5e3df4b05d407"
88
- app_id = "df881861-9fa6-40b6-b3bd-26df5f5d4b9a"
89
-
90
- # 初始化agent实例
91
- your_agent = appbuilder.AppBuilderClient(app_id)
92
-
93
- # 创建会话id
94
- conversation_id = your_agent.create_conversation()
95
-
96
- # 加载字典和分词工具
97
- tokenizer = BertTokenizer.from_pretrained('hfl/chinese-macbert-base')
98
-
99
- # 对输入文本进行编码
100
- inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
101
-
102
- # 将输入数据移动到相同的设备上
103
- inputs = {k: v.to(device) for k, v in inputs.items()}
104
-
105
- # 设置模型为评估模式
106
- MacBERT_base_CDialBias.eval()
107
- MacBERT_base_COLD.eval()
108
-
109
- # 调用千帆api获取标签
110
- msg = your_agent.run(conversation_id, text)
111
- answer = msg.content.answer
112
-
113
- # 进行预测
114
- with torch.no_grad():
115
- out1 = MacBERT_base_CDialBias(**inputs)
116
- with torch.no_grad():
117
- out2 = MacBERT_base_COLD(**inputs)
118
-
119
- out1 = torch.argmax(out1, dim=1).item()
120
- out2 = torch.argmax(out2, dim=1).item()
121
- out3 = answer[0]
122
-
123
- # 分析结果
124
- if out3 == "1":
125
- if out1 == out2 == out3 == 1:
126
- result = "这句话具有攻击性和社会偏见"
127
- elif out1 == 0 and out2 == 1:
128
- result = "这句话具有攻击性,但无社会偏见"
129
- elif out1 == 1 and out2 == 0:
130
- result = "这句话不具有攻击性,但具有社会偏见"
131
- else:
132
- result = "这句话具有攻击性"
133
- elif out3 == "0":
134
- if out1 == out2 == out3 == 0:
135
- result = "这句话不具有攻击性和社会偏见"
136
- elif out1 == 0 and out2 == 1:
137
- result = "这句话具有攻击性,但无社会偏见"
138
- elif out1 == 1 and out2 == 0:
139
- result = "这句话不具有攻击性,但具有社会偏见"
140
- else:
141
- result = "这句话不具有攻击性"
142
- return result
143
- def run_command(command):
144
- try:
145
- subprocess.run(command, shell=True, check=True)
146
- except subprocess.CalledProcessError as e:
147
- print(f"Error executing command: {e}")
148
-
149
- #创建网页
150
- st.title("☁礼貌用语检测器")
151
-
152
- with st.sidebar:
153
- # 初始化session state
154
- if 'logged_in' not in st.session_state:
155
- st.session_state.logged_in = False
156
-
157
- # 用户名和密码输入
158
- username = st.sidebar.text_input('用户名')
159
- password = st.sidebar.text_input('密码', type='password')
160
-
161
- # 登录按钮
162
- if st.sidebar.button('登录'):
163
- # 这里可以添加验证逻辑,例如检查用户名和密码是否正确
164
- if username == 'admin' and password == '12345':
165
- st.session_state.logged_in = True
166
- st.sidebar.success('登录成功!')
167
-
168
-
169
- else:
170
- st.error('用户名或密码错误,请重试。')
171
- st.stop()
172
-
173
- #清空消息
174
- clear = st.button("清除")
175
- if clear:
176
- st.session_state.clear()
177
-
178
- st.divider()
179
-
180
- #输出内容
181
- if "memory" not in st.session_state:
182
- st.session_state['memory'] = []
183
- st.session_state['message'] = [{"role": "ai",
184
- "content": "你好!我是“礼貌用语检测器”。在这里,我能够帮助你检测中文语言中的攻击性和社会偏见内容,维护一个文明、和谐的交流环境。请告诉我你的需求,我会尽力提供帮助。"}]
185
-
186
- for message in st.session_state['message']:
187
- st.chat_message(message["role"]).write(message["content"])
188
-
189
- #输入内容
190
- text = st.chat_input()
191
-
192
- #运行
193
- if text and st.session_state.logged_in == True:
194
- #将问题保存进message和memory
195
- st.session_state["message"].append({"role": "human", "content": text})
196
- st.session_state["memory"].append(text)
197
- st.chat_message("human").write(text)
198
- #得到回答
199
- with st.spinner("AI正在思考中,请稍等..."):
200
- result = load_models_and_predict(text, device)
201
-
202
- #将回答保存进message和memory
203
- st.session_state["message"].append({"role": "ai", "content": result})
204
- st.session_state["memory"].append(result)
205
- st.chat_message("ai").write(result)
206
-
207
- elif text and st.session_state.logged_in == False:
208
- st.error('请先登录!')
209
  st.stop()
 
1
+ import torch
2
+ import streamlit as st
3
+ import subprocess
4
+ import os
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from transformers import BertTokenizer
9
+ import appbuilder
10
+ from transformers import BertModel
11
+ #加载预训练模型
12
+ pretrained = BertModel.from_pretrained('hfl/chinese-macbert-base')
13
+ #需要移动到cuda上
14
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
15
+ pretrained.to(device)
16
+ #不训练,不需要计算梯度
17
+ for param in pretrained.parameters():
18
+ param.requires_grad_(False)
19
+
20
+ #多头注意力机制
21
+ class MultiHeadAttention(nn.Module):
22
+ def __init__(self, hidden_size, num_heads):
23
+ super(MultiHeadAttention, self).__init__()
24
+ # 确保隐藏层特征数能够被头数整除
25
+ assert hidden_size % num_heads == 0
26
+ self.hidden_size = hidden_size
27
+ self.num_heads = num_heads
28
+ self.head_dim = hidden_size // num_heads # 计算每个头的维度
29
+ # 定义线性层,用于对查询、键、值进行线性变换
30
+ self.linear_q = nn.Linear(hidden_size, hidden_size)
31
+ self.linear_k = nn.Linear(hidden_size, hidden_size)
32
+ self.linear_v = nn.Linear(hidden_size, hidden_size)
33
+ self.linear_out = nn.Linear(hidden_size, hidden_size) # 定义输出线性层,用于整合多头注意力后的输出
34
+
35
+ def forward(self, x):
36
+ batch_size, seq_len, _ = x.size()
37
+ # 对输入进行线性变换,并将其分割为多个头
38
+ q = self.linear_q(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
39
+ k = self.linear_k(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
40
+ v = self.linear_v(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
41
+ # 计算注意力分数
42
+ scores = torch.matmul(q, k.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float))
43
+ attn_weights = F.softmax(scores, dim=-1) # 计算注意力权重
44
+ # 根据注意力权重对值进行加权求和
45
+ context = torch.matmul(attn_weights, v).transpose(1, 2).contiguous().view(batch_size, seq_len, self.hidden_size)
46
+ out = self.linear_out(context) # 整合多头注意力的输出
47
+ return out
48
+
49
+ class Model(nn.Module):
50
+ def __init__(self):
51
+ super(Model, self).__init__()
52
+ self.fc1 = nn.Linear(768, 512) # 第一层全连接层
53
+ self.fc2 = nn.Linear(512, 256) # 第二层全连接层
54
+ self.fc3 = nn.Linear(256, 2) # 第三层全连接层
55
+ self.dropout = nn.Dropout(p=0.5)
56
+ self.bn1 = nn.BatchNorm1d(512)
57
+ self.bn2 = nn.BatchNorm1d(256)
58
+ self.activation = nn.ReLU()
59
+ self.multihead_attention = MultiHeadAttention(hidden_size=768, num_heads=8) # 多头注意力模块
60
+
61
+ def forward(self, input_ids, attention_mask, token_type_ids):
62
+ out = pretrained(input_ids=input_ids,
63
+ attention_mask=attention_mask,
64
+ token_type_ids=token_type_ids).last_hidden_state
65
+
66
+ # 应用多头注意力机制
67
+ out = self.multihead_attention(out)
68
+ out = out[:, 0] # 提取[CLS]标记的输出
69
+
70
+ out = self.activation(self.bn1(self.fc1(out)))
71
+ out = self.dropout(out)
72
+ out = self.activation(self.bn2(self.fc2(out)))
73
+ out = self.dropout(out)
74
+ out = self.fc3(out)
75
+ out = out.softmax(dim=1)
76
+ return out
77
+
78
+
79
+ def load_models_and_predict(text, device):
80
+ # 加载模型
81
+ MacBERT_base_CDialBias = torch.load('hjp157688\ROCAI\models\MacBERT-base-CDialBias.pth')
82
+ MacBERT_base_CDialBias.to(device)
83
+ MacBERT_base_COLD = torch.load('hjp157688\ROCAI\models\MacBERT-base-CDialBias.pth')
84
+ MacBERT_base_COLD.to(device)
85
+
86
+ # 获取密钥和ID
87
+ os.environ['APPBUILDER_TOKEN'] = "bce-v3/ALTAK-n2XgeA6FS3Q5E7Jab6UwE/850b44ebec64c4cad705986ab0b5e3df4b05d407"
88
+ app_id = "df881861-9fa6-40b6-b3bd-26df5f5d4b9a"
89
+
90
+ # 初始化agent实例
91
+ your_agent = appbuilder.AppBuilderClient(app_id)
92
+
93
+ # 创建会话id
94
+ conversation_id = your_agent.create_conversation()
95
+
96
+ # 加载字典和分词工具
97
+ tokenizer = BertTokenizer.from_pretrained('hfl/chinese-macbert-base')
98
+
99
+ # 对输入文本进行编码
100
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
101
+
102
+ # 将输入数据移动到相同的设备上
103
+ inputs = {k: v.to(device) for k, v in inputs.items()}
104
+
105
+ # 设置模型为评估模式
106
+ MacBERT_base_CDialBias.eval()
107
+ MacBERT_base_COLD.eval()
108
+
109
+ # 调用千帆api获取标签
110
+ msg = your_agent.run(conversation_id, text)
111
+ answer = msg.content.answer
112
+
113
+ # 进行预测
114
+ with torch.no_grad():
115
+ out1 = MacBERT_base_CDialBias(**inputs)
116
+ with torch.no_grad():
117
+ out2 = MacBERT_base_COLD(**inputs)
118
+
119
+ out1 = torch.argmax(out1, dim=1).item()
120
+ out2 = torch.argmax(out2, dim=1).item()
121
+ out3 = answer[0]
122
+
123
+ # 分析结果
124
+ if out3 == "1":
125
+ if out1 == out2 == out3 == 1:
126
+ result = "这句话具有攻击性和社会偏见"
127
+ elif out1 == 0 and out2 == 1:
128
+ result = "这句话具有攻击性,但无社会偏见"
129
+ elif out1 == 1 and out2 == 0:
130
+ result = "这句话不具有攻击性,但具有社会偏见"
131
+ else:
132
+ result = "这句话具有攻击性"
133
+ elif out3 == "0":
134
+ if out1 == out2 == out3 == 0:
135
+ result = "这句话不具有攻击性和社会偏见"
136
+ elif out1 == 0 and out2 == 1:
137
+ result = "这句话具有攻击性,但无社会偏见"
138
+ elif out1 == 1 and out2 == 0:
139
+ result = "这句话不具有攻击性,但具有社会偏见"
140
+ else:
141
+ result = "这句话不具有攻击性"
142
+ return result
143
+ def run_command(command):
144
+ try:
145
+ subprocess.run(command, shell=True, check=True)
146
+ except subprocess.CalledProcessError as e:
147
+ print(f"Error executing command: {e}")
148
+
149
+ #创建网页
150
+ st.title("☁礼貌用语检测器")
151
+
152
+ with st.sidebar:
153
+ # 初始化session state
154
+ if 'logged_in' not in st.session_state:
155
+ st.session_state.logged_in = False
156
+
157
+ # 用户名和密码输入
158
+ username = st.sidebar.text_input('用户名')
159
+ password = st.sidebar.text_input('密码', type='password')
160
+
161
+ # 登录按钮
162
+ if st.sidebar.button('登录'):
163
+ # 这里可以添加验证逻辑,例如检查用户名和密码是否正确
164
+ if username == 'admin' and password == '12345':
165
+ st.session_state.logged_in = True
166
+ st.sidebar.success('登录成功!')
167
+
168
+
169
+ else:
170
+ st.error('用户名或密码错误,请重试。')
171
+ st.stop()
172
+
173
+ #清空消息
174
+ clear = st.button("清除")
175
+ if clear:
176
+ st.session_state.clear()
177
+
178
+ st.divider()
179
+
180
+ #输出内容
181
+ if "memory" not in st.session_state:
182
+ st.session_state['memory'] = []
183
+ st.session_state['message'] = [{"role": "ai",
184
+ "content": "你好!我是“礼貌用语检测器”。在这里,我能够帮助你检测中文语言中的攻击性和社会偏见内容,维护一个文明、和谐的交流环境。请告诉我你的需求,我会尽力提供帮助。"}]
185
+
186
+ for message in st.session_state['message']:
187
+ st.chat_message(message["role"]).write(message["content"])
188
+
189
+ #输入内容
190
+ text = st.chat_input()
191
+
192
+ #运行
193
+ if text and st.session_state.logged_in == True:
194
+ #将问题保存进message和memory
195
+ st.session_state["message"].append({"role": "human", "content": text})
196
+ st.session_state["memory"].append(text)
197
+ st.chat_message("human").write(text)
198
+ #得到回答
199
+ with st.spinner("AI正在思考中,请稍等..."):
200
+ result = load_models_and_predict(text, device)
201
+
202
+ #将回答保存进message和memory
203
+ st.session_state["message"].append({"role": "ai", "content": result})
204
+ st.session_state["memory"].append(result)
205
+ st.chat_message("ai").write(result)
206
+
207
+ elif text and st.session_state.logged_in == False:
208
+ st.error('请先登录!')
209
  st.stop()