2000字范文,分享全网优秀范文,学习好帮手!
2000字范文 > 如何利用gradio部署基于bert4keras的小说随机生成模型

如何利用gradio部署基于bert4keras的小说随机生成模型

时间:2022-10-27 11:14:12

相关推荐

如何利用gradio部署基于bert4keras的小说随机生成模型

from __future__ import print_functionimport osos.environ['TF_KERAS'] = '1' # 必须使用tf.kerasimport globimport numpy as npfrom bert4keras.backend import keras, Kfrom keras.backend import set_sessionfrom bert4keras.layers import Lossfrom bert4keras.models import build_transformer_modelfrom bert4keras.tokenizers import Tokenizer, load_vocabfrom bert4keras.optimizers import Adamfrom bert4keras.snippets import sequence_padding, openfrom bert4keras.snippets import DataGenerator, AutoRegressiveDecoderfrom keras.models import Modelimport tensorflow as tf # 导入tf,备用graph = tf.get_default_graph()sess = tf.Session(graph=graph)# graph = tf.Graph() # 基本参数maxlen = 256+128batch_size = 64steps_per_epoch = 1000epochs = 10000# bert配置 autodl-tmp/chinese_roformer-v2-char_L-24_H-1024_A-16config_path = '/root/autodl-tmp/chinese_roformer-v2-char_L-12_H-768_A-12/bert_config.json'checkpoint_path = '/root/autodl-tmp/chinese_roformer-v2-char_L-12_H-768_A-12/bert_model.ckpt'dict_path = '/root/autodl-tmp/chinese_roformer-v2-char_L-12_H-768_A-12/vocab.txt'# 加载数据集# autodl-nas/all_txt.txt# 加载并精简词表,建立分词器# token_dict, keep_tokens = load_vocab(#dict_path=dict_path,#simplified=True,#startswith=['[PAD]', '[UNK]', '[CLS]', '[SEP]'],# )# tokenizer = Tokenizer(token_dict, do_lower_case=True)tokenizer = Tokenizer(dict_path, do_lower_case=True)class CrossEntropy(Loss):"""交叉熵作为loss,并mask掉输入部分"""def compute_loss(self, inputs, mask=None):y_true, y_mask, y_pred = inputsy_true = y_true[:, 1:] # 目标token_idsy_mask = y_mask[:, 1:] # segment_ids,刚好指示了要预测的部分y_pred = y_pred[:, :-1] # 预测序列,错开一位loss = K.sparse_categorical_crossentropy(y_true, y_pred)loss = K.sum(loss * y_mask) / K.sum(y_mask)return lossclass AutoTitle(AutoRegressiveDecoder):"""seq2seq解码器"""def __init__(self, start_id, end_id, maxlen, model):super().__init__(start_id, end_id, maxlen)self.model = modelself.models = {}@AutoRegressiveDecoder.wraps(default_rtype='probas')def predict(self, inputs, output_ids, states):token_ids, segment_ids = inputstoken_ids = np.concatenate([token_ids, output_ids], 1)segment_ids = np.concatenate([segment_ids, np.ones_like(output_ids)], 1)return self.last_token(self.model).predict([token_ids, segment_ids])def generate(self, text, topk=1):max_c_len = maxlen - self.maxlentoken_ids, segment_ids = tokenizer.encode(text, maxlen=max_c_len)output_ids = self.beam_search([token_ids, segment_ids],topk=topk) # 基于beam searchreturn tokenizer.decode(output_ids)# 在model加载前添加set_session# graph = tf.get_default_graph()# sess = tf.Session(graph=graph) # strategy = tf.distribute.MirroredStrategy() # 建立单机多卡策略# with strategy.scope(): # 调用该策略set_session(sess)bert = build_transformer_model(config_path,checkpoint_path=None,model='roformer_v2',# with_mlm='linear',application='unilm',return_keras_model=False)model = bert.model # 这个才是keras模型output = CrossEntropy(2)(model.inputs + model.outputs)model = Model(model.inputs, output)# pile(optimizer=Adam(1e-5))# model.summary()bert.load_weights_from_checkpoint(checkpoint_path) # 必须最后才加载预训练权重model.load_weights('/root/autodl-tmp/best_model.weights')autotitle = AutoTitle(start_id=None, end_id=tokenizer._token_end_id, maxlen=128+64,model=model)def model_predict(text):# 每次使用有关TensorFlow的请求时# in each request (i.e. in each thread):global sessglobal graph# graph = tf.get_default_graph()# sess = tf.Session(graph=graph)# set_session(sess)with graph.as_default():set_session(sess)summary_data = autotitle.generate(text)# K.clear_session()return summary_dataimport gradio as grfor i in range(2):print(model_predict("电荷交换"))demo = gr.Interface(fn=model_predict, inputs="text", outputs="text")gr.close_all()demo.launch(server_port=6006)

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。