namantjeaswi commited on
Commit
44a2309
·
1 Parent(s): d75dbab

initial commit

Browse files
Files changed (3) hide show
  1. .gitignore +2 -0
  2. app.py +77 -0
  3. requirements.txt +0 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ /venv
2
+ .env
app.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from transformers import pipeline
5
+ import os
6
+ import logging, sys
7
+ from dotenv import load_dotenv
8
+
9
+ from huggingface_hub import login
10
+ #load_dotenv()
11
+
12
+ #HF_TOKEN = os.environ.get("HF_API_TOKEN")
13
+ HF_TOKEN = st.secrets["HF_API_TOKEN"]
14
+ login(token=HF_TOKEN)
15
+
16
+ # Setup logging
17
+ logging.basicConfig(stream=sys.stdout, level=logging.INFO)
18
+ logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
19
+
20
+
21
+
22
+ # Load the tokenizer and model
23
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
24
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
25
+
26
+
27
+ #tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
28
+ #model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
29
+
30
+
31
+ # Define the directory to save the model
32
+ #save_directory = "models"
33
+
34
+ # Save the tokenizer and model to the specified directory
35
+ #Run once
36
+ #model.save_pretrained(save_directory)
37
+ #tokenizer.save_pretrained(save_directory)
38
+
39
+ # Load the tokenizer and model from the saved directory
40
+ #tokenizer = AutoTokenizer.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True,)
41
+ #model = AutoModelForCausalLM.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True)
42
+
43
+
44
+
45
+
46
+ pipe = pipeline("text-generation",
47
+ model=model,#"mistralai/Mistral-7B-v0.1",
48
+ tokenizer=tokenizer,
49
+ )
50
+
51
+ # Generate text using the pipeline
52
+ result = pipe("tell me about transformer.", max_length=50, truncation=True)
53
+ print(result)
54
+
55
+ #Using mistralai/Mistral-7B-Instruct-v0.2
56
+
57
+ #save_directory = 'Mistral-7B-Instruct-v0.2'
58
+
59
+ #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
60
+ #model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
61
+
62
+
63
+ #tokenizer = AutoTokenizer.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True,)
64
+ #model = AutoModelForCausalLM.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True)
65
+
66
+
67
+ pipe = pipeline("text-generation",
68
+ model=model, #'Mistral-7B-Instruct-v0.2'
69
+ tokenizer=tokenizer,
70
+ )
71
+
72
+
73
+ question =st.text_input("enter your question","tell me about transformer.")
74
+
75
+ # Generate text using the pipeline
76
+ result = pipe(question, max_length=50, truncation=True)
77
+ print(result)
requirements.txt ADDED
Binary file (2.16 kB). View file