Kim Adams commited on
Commit
15740f2
·
1 Parent(s): 741ce5e

small shifts

Browse files
create_games/data/game_ideas.json CHANGED
@@ -1,7 +1,7 @@
1
  [
2
  {
3
- "Name": "Family Portraits",
4
- "Description": "Each player takes turns drawing a portrait of their family. The other players must guess who is in the portrait. The player with the most correct guesses wins.",
5
- "Rationale": "This game encourages creativity and allows players to learn more about each other's families."
6
  }
7
  ]
 
1
  [
2
  {
3
+ "Name": "Food Frenzy",
4
+ "Description": "Players take turns drawing a food item on a piece of paper. The other player must guess what the food item is. The player who guesses correctly gets a point. The player with the most points at the end of the game wins.",
5
+ "Rationale": "This game is simple and easy to play, and encourages creativity and imagination. It also allows for a lot of fun and friendly competition between the two players."
6
  }
7
  ]
home_view/__pycache__/ui_home.cpython-311.pyc CHANGED
Binary files a/home_view/__pycache__/ui_home.cpython-311.pyc and b/home_view/__pycache__/ui_home.cpython-311.pyc differ
 
home_view/ui_home.py CHANGED
@@ -77,7 +77,7 @@ with gr.Blocks() as ui:
77
  logo=gr.Image(value=constants.BUILDPLAY_LOGO, width=200, height=200, show_download_button=False, container=False)
78
  directions= gr.Markdown(constants.DIRECTIONS, container=False)
79
  with gr.Column():
80
- playersCB = gr.CheckboxGroup ([], type="value", label=constants.PLAYERS, info=constants.PLAYERS_INFO )
81
  createBtn = gr.Button(value=constants.CREATE_GAME, variant="primary")
82
  gameTitle=gr.Markdown(constants.GAME_TITLE, container=False)
83
  desc=gr.TextArea(info=constants.GAME_DESC, interactive=False, show_label=False,
 
77
  logo=gr.Image(value=constants.BUILDPLAY_LOGO, width=200, height=200, show_download_button=False, container=False)
78
  directions= gr.Markdown(constants.DIRECTIONS, container=False)
79
  with gr.Column():
80
+ playersCB = gr.CheckboxGroup ([], label=constants.PLAYERS, info=constants.PLAYERS_INFO )
81
  createBtn = gr.Button(value=constants.CREATE_GAME, variant="primary")
82
  gameTitle=gr.Markdown(constants.GAME_TITLE, container=False)
83
  desc=gr.TextArea(info=constants.GAME_DESC, interactive=False, show_label=False,
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- pip>=21.3
2
  requests==2.31.0
3
  pylance==0.5.10
4
  pandas==2.0.3
@@ -9,8 +9,8 @@ scikit-learn==1.2.2
9
  plotly.express==0.4.1
10
  openai==0.27.8
11
  openapi-schema-pydantic==1.2.4
12
- gradio==3.39.0
13
- gradio_client==0.3.0
14
  GitPython==3.1.31
15
  elevenlabs==0.2.18
16
  python-dotenv==1.0.0
 
1
+ pip>=23.3
2
  requests==2.31.0
3
  pylance==0.5.10
4
  pandas==2.0.3
 
9
  plotly.express==0.4.1
10
  openai==0.27.8
11
  openapi-schema-pydantic==1.2.4
12
+ gradio==3.48.0
13
+ gradio_client==0.6.1
14
  GitPython==3.1.31
15
  elevenlabs==0.2.18
16
  python-dotenv==1.0.0
sketch/__pycache__/sketch.cpython-311.pyc CHANGED
Binary files a/sketch/__pycache__/sketch.cpython-311.pyc and b/sketch/__pycache__/sketch.cpython-311.pyc differ
 
sketch/sketch.py CHANGED
@@ -27,23 +27,34 @@ model.load_state_dict(state_dict, strict=False)
27
  model.eval()
28
 
29
  def Predict(img):
 
 
 
 
 
30
  # Convert to grayscale if the image is RGB
31
- if img is not None and img.any():
32
- if img.shape[-1] == 3:
33
- img = np.mean(img, axis=-1)
34
  # Convert the NumPy array to a PIL image
35
  img_pil = Image.fromarray(img.astype('uint8'))
 
36
  # Resize the image
37
- img_resized = img_pil.resize((28, 28), Image.ANTIALIAS)
 
38
  # Convert the PIL image back to a NumPy array
39
  img_np = np.array(img_resized)
 
40
  # Convert to tensor and normalize
41
  x = torch.tensor(img_np, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.0
 
42
  # Model prediction
43
  with torch.no_grad():
44
  out = model(x)
 
45
  probabilities = torch.nn.functional.softmax(out[0], dim=0)
46
  values, indices = torch.topk(probabilities, 5)
47
  confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}
 
48
  return confidences
49
 
 
27
  model.eval()
28
 
29
  def Predict(img):
30
+ if img is None or not img.any():
31
+ print("img is None or empty")
32
+ # Handle the error appropriately
33
+ return
34
+
35
  # Convert to grayscale if the image is RGB
36
+ if img.shape[-1] == 3:
37
+ img = np.mean(img, axis=-1)
38
+
39
  # Convert the NumPy array to a PIL image
40
  img_pil = Image.fromarray(img.astype('uint8'))
41
+
42
  # Resize the image
43
+ img_resized = img_pil.resize((28, 28), Image.ANTIALIAS)
44
+
45
  # Convert the PIL image back to a NumPy array
46
  img_np = np.array(img_resized)
47
+
48
  # Convert to tensor and normalize
49
  x = torch.tensor(img_np, dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 255.0
50
+
51
  # Model prediction
52
  with torch.no_grad():
53
  out = model(x)
54
+
55
  probabilities = torch.nn.functional.softmax(out[0], dim=0)
56
  values, indices = torch.topk(probabilities, 5)
57
  confidences = {LABELS[i]: v.item() for i, v in zip(indices, values)}
58
+
59
  return confidences
60