Spaces:
Paused
Paused
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig,BitsAndBytesConfig | |
| import torch | |
| model_id = "truongghieu/deci-finetuned_Prj2" | |
| # Check if a GPU is available | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| # Just for GPU | |
| bnb_config = BitsAndBytesConfig( | |
| load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype="float16", bnb_4bit_use_double_quant=True | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) | |
| # Load model in this way if use GPU | |
| if torch.cuda.is_available(): | |
| model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, quantization_config=bnb_config) | |
| else: | |
| model = AutoModelForCausalLM.from_pretrained("truongghieu/deci-finetuned", trust_remote_code=True) | |
| generation_config = GenerationConfig( | |
| penalty_alpha=0.6, | |
| do_sample=True, | |
| top_k=3, | |
| temperature=0.5, | |
| repetition_penalty=1.2, | |
| max_new_tokens=50, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| # Define a function that takes a text input and generates a text output | |
| def generate_text(text): | |
| input_text = f'###Human: \"{text}\"' | |
| input_ids = tokenizer.encode(input_text, return_tensors="pt").to(device) | |
| output_ids = model.generate(input_ids, generation_config=generation_config) | |
| output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
| return output_text | |
| iface = gr.Interface(fn=generate_text, inputs="text", outputs="text") | |
| iface.launch() | |