Merged remote and local repositories
Browse files- .gitattributes +3 -0
- README.md +3 -0
- app.py +0 -8
- requirements.txt +0 -14
.gitattributes
CHANGED
|
@@ -33,4 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 36 |
data-source/resume.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
<<<<<<< HEAD
|
| 37 |
data-source/resume.pdf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
=======
|
| 39 |
+
>>>>>>> 931e0e35b8b7cb0d24798a7daff7c3ad9e4b1a83
|
README.md
CHANGED
|
@@ -58,5 +58,8 @@ Create a Dataset Repo
|
|
| 58 |
Login Huggingface on Terminal and Push all code to Space Repo
|
| 59 |
* Login huggingface on terminal: `huggingface-cli login`
|
| 60 |
* git remote add hf https://huggingface.co/spaces/O-G-O/AboutMe
|
|
|
|
|
|
|
|
|
|
| 61 |
* git push hf main
|
| 62 |
|
|
|
|
| 58 |
Login Huggingface on Terminal and Push all code to Space Repo
|
| 59 |
* Login huggingface on terminal: `huggingface-cli login`
|
| 60 |
* git remote add hf https://huggingface.co/spaces/O-G-O/AboutMe
|
| 61 |
+
* if the code base has been pushed to github initially: git pull hf main --allow-unrelated-histories
|
| 62 |
+
* git add .
|
| 63 |
+
* git commit -m "Merged remote and local histories"
|
| 64 |
* git push hf main
|
| 65 |
|
app.py
CHANGED
|
@@ -570,13 +570,10 @@ if __name__ == "__main__":
|
|
| 570 |
|
| 571 |
done = False
|
| 572 |
while not done:
|
| 573 |
-
<<<<<<< HEAD
|
| 574 |
-
=======
|
| 575 |
# gemini = OpenAI(api_key=self.google_api_key, base_url=self.google_gai_url)
|
| 576 |
# model_name = "gemini-2.0-flash"
|
| 577 |
# response = gemini.chat.completions.create(model=model_name, messages=messages, tools=tools)
|
| 578 |
# finish_reason = response.choices[0].finish_reason
|
| 579 |
-
>>>>>>> 429a64b90d1ab4d844b4ee7653000d7733c180ba
|
| 580 |
openai_client = OpenAI(api_key=self.openai_api_key)
|
| 581 |
|
| 582 |
model_name = "gpt-3.5-turbo"
|
|
@@ -603,8 +600,3 @@ if __name__ == "__main__":
|
|
| 603 |
me = Me()
|
| 604 |
gr.ChatInterface(me.chat, type="messages").launch()
|
| 605 |
|
| 606 |
-
<<<<<<< HEAD
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
=======
|
| 610 |
-
>>>>>>> 429a64b90d1ab4d844b4ee7653000d7733c180ba
|
|
|
|
| 570 |
|
| 571 |
done = False
|
| 572 |
while not done:
|
|
|
|
|
|
|
| 573 |
# gemini = OpenAI(api_key=self.google_api_key, base_url=self.google_gai_url)
|
| 574 |
# model_name = "gemini-2.0-flash"
|
| 575 |
# response = gemini.chat.completions.create(model=model_name, messages=messages, tools=tools)
|
| 576 |
# finish_reason = response.choices[0].finish_reason
|
|
|
|
| 577 |
openai_client = OpenAI(api_key=self.openai_api_key)
|
| 578 |
|
| 579 |
model_name = "gpt-3.5-turbo"
|
|
|
|
| 600 |
me = Me()
|
| 601 |
gr.ChatInterface(me.chat, type="messages").launch()
|
| 602 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1,20 +1,6 @@
|
|
| 1 |
-
<<<<<<< HEAD
|
| 2 |
-
<<<<<<< HEAD
|
| 3 |
-
=======
|
| 4 |
-
>>>>>>> 429a64b90d1ab4d844b4ee7653000d7733c180ba
|
| 5 |
requests
|
| 6 |
python-dotenv
|
| 7 |
gradio
|
| 8 |
pypdf
|
| 9 |
openai
|
| 10 |
-
<<<<<<< HEAD
|
| 11 |
-
=======
|
| 12 |
-
requests
|
| 13 |
-
python-dotenv
|
| 14 |
-
gradio
|
| 15 |
-
pypdf
|
| 16 |
-
openai
|
| 17 |
-
>>>>>>> 959af104128da603934ae77f2ce8eef68321816e
|
| 18 |
-
=======
|
| 19 |
-
>>>>>>> 429a64b90d1ab4d844b4ee7653000d7733c180ba
|
| 20 |
openai-agents
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
requests
|
| 2 |
python-dotenv
|
| 3 |
gradio
|
| 4 |
pypdf
|
| 5 |
openai
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
openai-agents
|