| """ | |
| import torch | |
| import tensorflow as tf | |
| import flax | |
| import gradio as gr | |
| from transformers import pipeline | |
| sentiment_pipeline= pipeline("sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment") | |
| # texts = ["Hugging face? weired, but memorable.", "I am despirate"] | |
| # results = sentiment_pipeline(texts) | |
| # for text, results in zip(texts, results): | |
| # print(f"Text: {text}") | |
| # print(f"Sentiment: {result['label']}, Score: {result['score']:.4f}\n") | |
| def predict_sentiment(text): | |
| result = sentiment_pipeline(text) | |
| return result[0]['label'], result[0]['score'] | |
| iface = gr.Interface(fn=predict_sentiment, inputs="text", outputs = ["label","number"]) | |
| if __name__ == "__main__": | |
| iface.launch() | |
| """ | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| torch_device = "cuda" if torch.cuda.is_available() else "cpu" | |
| tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
| model = AutoModelForCausalLM.from_pretrained("gpt2", pad_token_id=tokenizer.eos_token_id).to(torch_device) | |
| model_inputs = tokenizer('An explanation of Linear Regression: ', return_tensors='pt').to(torch_device) | |
| output = model.generate(**model_inputs, max_new_tokens=50, do_sample=True, top_p=0.92, top_k=0, temperature=0.6) | |
| print(tokenizer.decode(output[0],skip_special_tokens=True)) | |