Update main.py
Browse files
main.py
CHANGED
|
@@ -1,9 +1,4 @@
|
|
| 1 |
-
|
| 2 |
-
from diffusers import StableDiffusionPipeline
|
| 3 |
-
import torch
|
| 4 |
-
from fastapi import FastAPI, Response
|
| 5 |
-
from fastapi.middleware.cors import CORSMiddleware
|
| 6 |
-
from auth_token import auth_token
|
| 7 |
|
| 8 |
app = FastAPI()
|
| 9 |
|
|
@@ -30,14 +25,16 @@ pipe.safety_checker = dummy
|
|
| 30 |
def hello():
|
| 31 |
return "Hello, I'm Artist"
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
@app.get("/gen/{prompt}")
|
| 35 |
def generate_image(prompt: str):
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
return "Gen done"
|
|
|
|
| 1 |
+
import Linlada
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
app = FastAPI()
|
| 4 |
|
|
|
|
| 25 |
def hello():
|
| 26 |
return "Hello, I'm Artist"
|
| 27 |
|
| 28 |
+
async def generate(prompt):
|
| 29 |
+
result = await Linlada._create_completion(model='gpt-4', messages=[
|
| 30 |
+
{"role": "user", "content": promp}], stream=False) # alterative model setting
|
| 31 |
+
for message in response:
|
| 32 |
+
return message
|
| 33 |
|
| 34 |
@app.get("/gen/{prompt}")
|
| 35 |
def generate_image(prompt: str):
|
| 36 |
+
loop = asyncio.new_event_loop()
|
| 37 |
+
asyncio.set_event_loop(loop)
|
| 38 |
+
result = loop.run_until_complete(generate(prompt))
|
| 39 |
+
loop.close()
|
| 40 |
+
return result
|
|
|
|
|
|
|
|
|