tranduy2611 commited on
Commit
4e216fb
·
1 Parent(s): 10baa19
Files changed (5) hide show
  1. TextGen/ConfigEnv.py +0 -19
  2. TextGen/__init__.py +0 -5
  3. TextGen/router.py +0 -53
  4. app.py +59 -1
  5. requirements.txt +6 -6
TextGen/ConfigEnv.py DELETED
@@ -1,19 +0,0 @@
1
- """Config class for handling env variables.
2
- """
3
- from functools import lru_cache
4
- from pydantic import BaseSettings
5
-
6
- class Settings(BaseSettings):
7
- APP_ID: str
8
- USER_ID: str
9
- MODEL_ID: str
10
- CLARIFAI_PAT: str
11
- MODEL_VERSION_ID: str
12
-
13
- class Config:
14
- env_file = '.env'
15
-
16
- @lru_cache()
17
- def get_settings():
18
- return Settings()
19
- config = get_settings()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
TextGen/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- from fastapi import FastAPI
2
-
3
- app = FastAPI(title="Deploying FastAPI Apps on Huggingface")
4
-
5
- from TextGen import router
 
 
 
 
 
 
TextGen/router.py DELETED
@@ -1,53 +0,0 @@
1
- from pydantic import BaseModel
2
-
3
- from .ConfigEnv import config
4
- from fastapi.middleware.cors import CORSMiddleware
5
-
6
- from langchain.llms import Clarifai
7
- from langchain.chains import LLMChain
8
- from langchain.prompts import PromptTemplate
9
-
10
- from TextGen import app
11
-
12
- class Generate(BaseModel):
13
- text:str
14
-
15
- def generate_text(prompt: str):
16
- if prompt == "":
17
- return {"detail": "Please provide a prompt."}
18
- else:
19
- prompt = PromptTemplate(template=prompt, input_variables=['Prompt'])
20
-
21
- llm = Clarifai(
22
- pat = config.CLARIFAI_PAT,
23
- user_id = config.USER_ID,
24
- app_id = config.APP_ID,
25
- model_id = config.MODEL_ID,
26
- model_version_id=config.MODEL_VERSION_ID,
27
- )
28
-
29
- llmchain = LLMChain(
30
- prompt=prompt,
31
- llm=llm
32
- )
33
-
34
- llm_response = llmchain.run({"Prompt": prompt})
35
- return Generate(text=llm_response)
36
-
37
-
38
-
39
- app.add_middleware(
40
- CORSMiddleware,
41
- allow_origins=["*"],
42
- allow_credentials=True,
43
- allow_methods=["*"],
44
- allow_headers=["*"],
45
- )
46
-
47
- @app.get("/", tags=["Home"])
48
- def api_home():
49
- return {'detail': 'Welcome to FastAPI TextGen Tutorial!'}
50
-
51
- @app.post("/api/generate", summary="Generate text from prompt", tags=["Generate"], response_model=Generate)
52
- def inference(input_prompt: str):
53
- return generate_text(prompt=input_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1 +1,59 @@
1
- from TextGen import app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile, Form
2
+ from fastapi.responses import FileResponse
3
+ from diffusers import AutoPipelineForImage2Image
4
+ from PIL import Image
5
+ import torch
6
+ import os
7
+
8
+ # Initialize FastAPI app
9
+ app = FastAPI()
10
+
11
+ # Load the model
12
+ model_id = "kandinsky-community/kandinsky-2-2-decoder"
13
+ device = "cuda" if torch.cuda.is_available() else "cpu"
14
+ pipe = AutoPipelineForImage2Image.from_pretrained(
15
+ model_id,
16
+ torch_dtype=torch.float32, use_safetensors=True
17
+ ).to(device)
18
+
19
+ # Create an output directory
20
+ os.makedirs("output_images", exist_ok=True)
21
+
22
+ @app.get("/")
23
+ async def root():
24
+ return {"message": "Welcome to the Image-to-Image API!"}
25
+
26
+ @app.post("/image-to-image/")
27
+ async def image_to_image(
28
+ prompt: str = Form(...),
29
+ stren: float = Form(...),
30
+ negative_prompt: str = Form(...),
31
+ image: UploadFile = File(...)
32
+ ):
33
+ """
34
+ Perform image-to-image transformation using a given prompt and input image.
35
+
36
+ Args:
37
+ - prompt (str): Text prompt describing the desired transformation.
38
+ - image (UploadFile): Input image file.
39
+
40
+ Returns:
41
+ - FileResponse: The transformed image file.
42
+ """
43
+ try:
44
+ # Open and preprocess the input image
45
+ input_image = Image.open(image.file).convert("RGB")
46
+ original_size = input_image.size # Save the original size
47
+ # Generate the output image using the pipeline
48
+ generated_image = pipe(prompt=prompt,negative_prompt=negative_prompt ,image=input_image,strength=stren).images[0]
49
+ resized_image = generated_image.resize(original_size, Image.LANCZOS)
50
+ # Save the generated image
51
+ output_path = f"output_images/generated_{image.filename}"
52
+ resized_image.save(output_path)
53
+
54
+ # Return the generated image as a response
55
+ return FileResponse(output_path, media_type="image/png")
56
+
57
+ except Exception as e:
58
+ return {"error": str(e)}
59
+
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
- fastapi==0.99.1
2
  uvicorn
3
- requests
4
- pydantic==1.10.12
5
- langchain
6
- clarifai
7
- Pillow
 
1
+ fastapi
2
  uvicorn
3
+ pillow
4
+ transformers
5
+ diffusers
6
+ torch
7
+ python-multipart