Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
|
@@ -4,45 +4,24 @@ from pydantic import BaseModel # Import BaseModel from pydantic for data valida
|
|
| 4 |
from huggingface_hub import InferenceClient # Import InferenceClient from huggingface_hub
|
| 5 |
import uvicorn # Import uvicorn for running the FastAPI application
|
| 6 |
|
| 7 |
-
app = FastAPI() # Create a FastAPI instance
|
| 8 |
|
| 9 |
# Define the primary and fallback models
|
| 10 |
primary = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
| 11 |
-
fallbacks = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/
|
| 12 |
-
|
| 13 |
-
# Load Hugging Face API Key
|
| 14 |
-
HF_API_KEY = os.getenv("HF_API_TOKEN") # Get API key from environment variables
|
| 15 |
-
|
| 16 |
-
# If no env variable is set, manually define it (not recommended)
|
| 17 |
-
if not HF_API_KEY:
|
| 18 |
-
HF_API_KEY = "your_huggingface_api_key"
|
| 19 |
|
| 20 |
# Define the data model for the request body
|
| 21 |
class Item(BaseModel):
|
| 22 |
-
input: str = None
|
| 23 |
-
system_prompt: str =
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
history: list = [] # Stores previous messages
|
| 33 |
-
templates: list = [
|
| 34 |
-
{"input": "I love you", "output": "I'm happy to be your friend, but love isn’t something that can be forced!"},
|
| 35 |
-
{"input": "Will you be my girlfriend?", "output": "Sorry, but I already have someone special in my heart!"},
|
| 36 |
-
{"input": "Tell me about yourself", "output": "I'm Sebari-chan! I love making friends, asking questions, and learning new things."},
|
| 37 |
-
{"input": "Who do you love?", "output": "That’s a secret! But I care about all my friends. 😊"},
|
| 38 |
-
]
|
| 39 |
-
temperature: float = 0.7 # Controls randomness (0 = predictable, 1 = highly random)
|
| 40 |
-
max_new_tokens: int = 1048 # Maximum response length
|
| 41 |
-
top_p: float = 0.9 # Sampling parameter for diverse responses
|
| 42 |
-
repetition_penalty: float = 1.1 # Prevents repetition
|
| 43 |
-
key: str = None # API key if needed
|
| 44 |
-
|
| 45 |
-
|
| 46 |
|
| 47 |
# Function to generate the response JSON
|
| 48 |
def generate_response_json(item, output, tokens, model_name):
|
|
@@ -115,7 +94,7 @@ async def generate_text(item: Item = None):
|
|
| 115 |
)
|
| 116 |
|
| 117 |
tokens = 0
|
| 118 |
-
client = InferenceClient(primary
|
| 119 |
stream = client.text_generation(input_, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
| 120 |
output = ""
|
| 121 |
for response in stream:
|
|
@@ -132,7 +111,7 @@ async def generate_text(item: Item = None):
|
|
| 132 |
|
| 133 |
for model in fallbacks:
|
| 134 |
try:
|
| 135 |
-
client = InferenceClient(model
|
| 136 |
stream = client.text_generation(input_, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
| 137 |
output = ""
|
| 138 |
for response in stream:
|
|
@@ -146,10 +125,9 @@ async def generate_text(item: Item = None):
|
|
| 146 |
|
| 147 |
raise HTTPException(status_code=500, detail=error)
|
| 148 |
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
return {"status": "Sebari-chan is online!"}
|
| 153 |
|
| 154 |
if __name__ == "__main__":
|
| 155 |
-
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
|
| 4 |
from huggingface_hub import InferenceClient # Import InferenceClient from huggingface_hub
|
| 5 |
import uvicorn # Import uvicorn for running the FastAPI application
|
| 6 |
|
| 7 |
+
app = FastAPI(HF_API_TOKEN) # Create a FastAPI instance
|
| 8 |
|
| 9 |
# Define the primary and fallback models
|
| 10 |
primary = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
| 11 |
+
fallbacks = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mistral-7B-Instruct-v0.1"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# Define the data model for the request body
|
| 14 |
class Item(BaseModel):
|
| 15 |
+
input: str = None
|
| 16 |
+
system_prompt: str = None
|
| 17 |
+
system_output: str = None
|
| 18 |
+
history: list = None
|
| 19 |
+
templates: list = None
|
| 20 |
+
temperature: float = 0.0
|
| 21 |
+
max_new_tokens: int = 1048
|
| 22 |
+
top_p: float = 0.15
|
| 23 |
+
repetition_penalty: float = 1.0
|
| 24 |
+
key: str = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
# Function to generate the response JSON
|
| 27 |
def generate_response_json(item, output, tokens, model_name):
|
|
|
|
| 94 |
)
|
| 95 |
|
| 96 |
tokens = 0
|
| 97 |
+
client = InferenceClient(primary)
|
| 98 |
stream = client.text_generation(input_, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
| 99 |
output = ""
|
| 100 |
for response in stream:
|
|
|
|
| 111 |
|
| 112 |
for model in fallbacks:
|
| 113 |
try:
|
| 114 |
+
client = InferenceClient(model)
|
| 115 |
stream = client.text_generation(input_, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
| 116 |
output = ""
|
| 117 |
for response in stream:
|
|
|
|
| 125 |
|
| 126 |
raise HTTPException(status_code=500, detail=error)
|
| 127 |
|
| 128 |
+
if "KEY" in os.environ:
|
| 129 |
+
if item.key != os.environ["KEY"]:
|
| 130 |
+
raise HTTPException(status_code=401, detail="Valid key is required.")
|
|
|
|
| 131 |
|
| 132 |
if __name__ == "__main__":
|
| 133 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|