|
|
import gradio as gr |
|
|
import openai |
|
|
import requests |
|
|
import os |
|
|
import fileinput |
|
|
from dotenv import load_dotenv |
|
|
import io |
|
|
from PIL import Image |
|
|
from stability_sdk import client |
|
|
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation |
|
|
|
|
|
title="Haruki Murakami generator" |
|
|
inputs_label="どんな感じの文章を書いて欲しいか打ってください" |
|
|
outputs_label="村上春樹風の文章が出てきます" |
|
|
visual_outputs_label="言っている内容のイメージ" |
|
|
description=""" |
|
|
- ※入出力の文字数は最大1000文字程度までを目安に入力してください。解答に120秒くらいかかります.エラーが出た場合はログを開いてエラーメッセージを送ってくれるとochyAIが喜びます |
|
|
""" |
|
|
|
|
|
article = """ |
|
|
""" |
|
|
|
|
|
load_dotenv() |
|
|
openai.api_key = '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' |
|
|
os.environ['STABILITY_HOST'] = 'grpc.stability.ai:443' |
|
|
stability_api = client.StabilityInference( |
|
|
key=os.getenv('sk-xUJFoMCFmHzpQyeRBAOnNco66RGoVwKhC7AarRnrf295Q9FT'), |
|
|
verbose=True, |
|
|
) |
|
|
MODEL = "gpt-4" |
|
|
|
|
|
def get_filetext(filename, cache={}): |
|
|
if filename in cache: |
|
|
|
|
|
return cache[filename] |
|
|
else: |
|
|
if not os.path.exists(filename): |
|
|
raise ValueError(f"ファイル '{filename}' が見つかりませんでした") |
|
|
with open(filename, "r") as f: |
|
|
text = f.read() |
|
|
|
|
|
cache[filename] = text |
|
|
return text |
|
|
|
|
|
class OpenAI: |
|
|
|
|
|
@classmethod |
|
|
def chat_completion(cls, prompt, start_with=""): |
|
|
constraints = get_filetext(filename = "haruki.md") |
|
|
template = get_filetext(filename = "template.md") |
|
|
|
|
|
|
|
|
data = { |
|
|
"model": "gpt-4", |
|
|
"messages": [ |
|
|
{"role": "system", "content": constraints} |
|
|
,{"role": "system", "content": template} |
|
|
,{"role": "assistant", "content": "Sure!"} |
|
|
,{"role": "user", "content": prompt} |
|
|
,{"role": "assistant", "content": start_with} |
|
|
], |
|
|
} |
|
|
|
|
|
|
|
|
response = requests.post( |
|
|
"https://api.openai.com/v1/chat/completions", |
|
|
headers={ |
|
|
"Content-Type": "application/json", |
|
|
"Authorization": f"Bearer {openai.api_key}" |
|
|
}, |
|
|
json=data |
|
|
) |
|
|
|
|
|
|
|
|
result = response.json() |
|
|
print(result) |
|
|
|
|
|
content = result["choices"][0]["message"]["content"].strip() |
|
|
|
|
|
visualize_prompt = content.split("### Prompt for Visual Expression\n\n")[1] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
answers = stability_api.generate( |
|
|
prompt=visualize_prompt, |
|
|
) |
|
|
|
|
|
for resp in answers: |
|
|
for artifact in resp.artifacts: |
|
|
if artifact.finish_reason == generation.FILTER: |
|
|
print("NSFW") |
|
|
if artifact.type == generation.ARTIFACT_IMAGE: |
|
|
img = Image.open(io.BytesIO(artifact.binary)) |
|
|
return [content, img] |
|
|
|
|
|
class NajiminoAI: |
|
|
|
|
|
@classmethod |
|
|
def generate_emo_prompt(cls, user_message): |
|
|
template = get_filetext(filename="template.md") |
|
|
prompt = f""" |
|
|
{user_message} |
|
|
--- |
|
|
上記を元に、下記テンプレートを埋めてください。 |
|
|
--- |
|
|
{template} |
|
|
""" |
|
|
return prompt |
|
|
|
|
|
@classmethod |
|
|
def generate_emo(cls, user_message): |
|
|
prompt = NajiminoAI.generate_emo_prompt(user_message); |
|
|
start_with = "" |
|
|
result = OpenAI.chat_completion(prompt=prompt, start_with=start_with) |
|
|
return result |
|
|
|
|
|
def main(): |
|
|
iface = gr.Interface(fn=NajiminoAI.generate_emo, |
|
|
inputs=gr.Textbox(label=inputs_label), |
|
|
outputs=[gr.Textbox(label=inputs_label), gr.Image(label=visual_outputs_label)], |
|
|
title=title, |
|
|
description=description, |
|
|
article=article, |
|
|
allow_flagging='never' |
|
|
) |
|
|
|
|
|
iface.launch() |
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |