Chris4K commited on
Commit
ccebec7
·
verified ·
1 Parent(s): f20e07e

Rename spp to app.py

Browse files
Files changed (2) hide show
  1. app.py +68 -0
  2. spp +0 -0
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+ from transformers import pipeline
4
+
5
+
6
+ from transformers import Tool
7
+ # Import other necessary libraries if needed
8
+
9
+ class TextGenerationTool(Tool):
10
+ name = "text_generator"
11
+ description = (
12
+ "This is a tool for text generation. It takes a prompt as input and returns the generated text."
13
+ )
14
+
15
+ inputs = ["text"]
16
+ outputs = ["text"]
17
+
18
+ def __call__(self, prompt: str):
19
+ #API_URL = "https://api-inference.huggingface.co/models/openchat/openchat_3.5"
20
+ #headers = {"Authorization": "Bearer " + os.environ['hf']}
21
+ token=os.environ['HF_token']
22
+ #payload = {
23
+ # "inputs": prompt # Adjust this based on your model's input format
24
+ #}
25
+
26
+ #payload = {
27
+ # "inputs": "Can you please let us know more details about your ",
28
+ # }
29
+
30
+ #def query(payload):
31
+ #generated_text = requests.post(API_URL, headers=headers, json=payload).json()
32
+ #print(generated_text)
33
+ #return generated_text["text"]
34
+
35
+ # Replace the following line with your text generation logic
36
+ #generated_text = f"Generated text based on the prompt: '{prompt}'"
37
+
38
+ # Initialize the text generation pipeline
39
+ #text_generator = pipeline(model="lgaalves/gpt2-dolly", token=token)
40
+ text_generator = pipeline(model="microsoft/Orca-2-13b", token=token)
41
+
42
+ # Generate text based on a prompt
43
+ generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
44
+
45
+ # Print the generated text
46
+ print(generated_text)
47
+
48
+
49
+
50
+ return generated_text
51
+
52
+ # Define the payload for the request
53
+ #payload = {
54
+ # "inputs": prompt # Adjust this based on your model's input format
55
+ #}
56
+
57
+ # Make the request to the API
58
+ #generated_text = requests.post(API_URL, headers=headers, json=payload).json()
59
+
60
+ # Extract and return the generated text
61
+ #return generated_text["generated_text"]
62
+
63
+ # Uncomment and customize the following lines based on your text generation needs
64
+ # text_generator = pipeline(model="gpt2")
65
+ # generated_text = text_generator(prompt, max_length=500, num_return_sequences=1, temperature=0.7)
66
+
67
+ # Print the generated text if needed
68
+ # print(generated_text)
spp DELETED
File without changes