XLMG-Infernce / app.py
abdullah2021's picture
Update app.py
3b382ac
raw
history blame contribute delete
622 Bytes
from transformers import pipeline
hf_tag = "facebook/xglm-4.5B"
xglm_model = pipeline('text-generation', model=hf_tag)
def query_xglm(prompt:str, print_response=True, min_rl=40, max_rl=260):
result =xglm_model(prompt,
do_sample=True,
min_length=min_rl+len(prompt),
max_length=max_rl+len(prompt),
top_k=40,
temperature=0.6,
clean_up_tokenization_spaces=True)
return result[0]['generated_text']
face=gr.Interface(
fn=query_xglm,
inputs=['text'],
outputs=['text'],
title='MULTILINGUAL TEXT GENERATION',
examples=samples
)
face.launch()