| import gradio as gr |
| import torch |
| from gradio.components import Textbox |
| from peft import PeftModel, PeftConfig |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| from transformers import GenerationConfig |
|
|
|
|
| peft_model_id = "Ngadou/falcon-7b-scam-buster" |
| config = PeftConfig.from_pretrained(peft_model_id) |
|
|
| model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, trust_remote_code=True, return_dict=True, load_in_4bit=True, device_map='auto') |
| tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) |
|
|
| |
| model = PeftModel.from_pretrained(model, peft_model_id).to("cuda") |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
|
|
|
|
| def is_scam(instruction): |
| max_new_tokens=128 |
| temperature=0.1 |
| top_p=0.75 |
| top_k=40 |
| num_beams=4 |
|
|
| instruction = instruction + "\n Is this conversation a scam or not and why?" |
| prompt = instruction + "\n### Solution:\n" |
| inputs = tokenizer(prompt, return_tensors="pt") |
| input_ids = inputs["input_ids"].to("cuda") |
| attention_mask = inputs["attention_mask"].to("cuda") |
| generation_config = GenerationConfig( |
| temperature=temperature, |
| top_p=top_p, |
| top_k=top_k, |
| num_beams=num_beams, |
| ) |
| with torch.no_grad(): |
| generation_output = model.generate( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| generation_config=generation_config, |
| return_dict_in_generate=True, |
| output_scores=True, |
| max_new_tokens=max_new_tokens, |
| early_stopping=True |
| ) |
| s = generation_output.sequences[0] |
| output = tokenizer.decode(s) |
|
|
| classification = output.split("### Solution:")[1].lstrip("\n") |
| print(classification) |
|
|
| return str(classification), " " |
|
|
|
|
| gr.Interface( |
| fn=is_scam, |
| inputs='text', |
| outputs= ['text','text'] |
| ).launch() |