namantjeaswi commited on
Commit
d01ece4
·
1 Parent(s): 55b66da

checking write key

Browse files
Files changed (1) hide show
  1. app.py +2 -28
app.py CHANGED
@@ -24,23 +24,6 @@ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
24
  model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
25
 
26
 
27
- #tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
28
- #model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
29
-
30
-
31
- # Define the directory to save the model
32
- #save_directory = "models"
33
-
34
- # Save the tokenizer and model to the specified directory
35
- #Run once
36
- #model.save_pretrained(save_directory)
37
- #tokenizer.save_pretrained(save_directory)
38
-
39
- # Load the tokenizer and model from the saved directory
40
- #tokenizer = AutoTokenizer.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True,)
41
- #model = AutoModelForCausalLM.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True)
42
-
43
-
44
 
45
 
46
  pipe = pipeline("text-generation",
@@ -52,17 +35,6 @@ pipe = pipeline("text-generation",
52
  result = pipe("tell me about transformer.", max_length=50, truncation=True)
53
  print(result)
54
 
55
- #Using mistralai/Mistral-7B-Instruct-v0.2
56
-
57
- #save_directory = 'Mistral-7B-Instruct-v0.2'
58
-
59
- #tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
60
- #model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
61
-
62
-
63
- #tokenizer = AutoTokenizer.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True,)
64
- #model = AutoModelForCausalLM.from_pretrained(save_directory, local_files_only=True,load_in_8bit=True)
65
-
66
 
67
  pipe = pipeline("text-generation",
68
  model=model, #'Mistral-7B-Instruct-v0.2'
@@ -70,6 +42,8 @@ pipe = pipeline("text-generation",
70
  )
71
 
72
 
 
 
73
  question =st.text_input("enter your question","tell me about transformer.")
74
 
75
  # Generate text using the pipeline
 
24
  model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
25
 
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
 
28
 
29
  pipe = pipeline("text-generation",
 
35
  result = pipe("tell me about transformer.", max_length=50, truncation=True)
36
  print(result)
37
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  pipe = pipeline("text-generation",
40
  model=model, #'Mistral-7B-Instruct-v0.2'
 
42
  )
43
 
44
 
45
+
46
+
47
  question =st.text_input("enter your question","tell me about transformer.")
48
 
49
  # Generate text using the pipeline