diff --git a/annotations/1.json b/annotations/1.json
index f5b8a4117a4a500cd7b8426a3014baed0a8379a4..fbe019c8315d56b9d9939ef34049c210d62504e0 100644
--- a/annotations/1.json
+++ b/annotations/1.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:58d269453ed555e0a5f293e5ff9ab44642d5bc60c5329d38a14e142c58105f9a
-size 598
+oid sha256:a38f06b2c513e2894cbcb6b608b24dd010b979dfc85bc5d1eead96c90f0c8f66
+size 599
diff --git a/annotations/10.json b/annotations/10.json
index 6dee81d6d893c065eb63be5baa5e3abb3491d06e..2ccba83a5ac99a30720e2ddc86806ff169189d94 100644
--- a/annotations/10.json
+++ b/annotations/10.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:39a24169d8c67ebd91fdb168d7b568b593c62e6179dd11b6ddd5e027ec575ac8
-size 599
+oid sha256:b9b2682e14b1627d8291199313a78bf3bb6d8eaeb0ba8bb3ca92956f28d31e38
+size 603
diff --git a/annotations/11.json b/annotations/11.json
index 0bdfa74680c35fdb42c1aed91ddace769a9e4b5e..9ecbf1d007914126545110cd0d9723443c9e9a4c 100644
--- a/annotations/11.json
+++ b/annotations/11.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:d633bf81fd7e3ab0db33a8ca0fe61a562ecf1baae30cf2272a9585f3c2fcd6e3
+oid sha256:5945b087a33bbe981e22de200d7da62b56402a3a07cba5e173de4679cfc85478
size 599
diff --git a/annotations/12.json b/annotations/12.json
index 08807196083cd73506efa32c4884ce727f9795f8..bae30ecfe0cbe12d9d4b80b437ddb31fdc624876 100644
--- a/annotations/12.json
+++ b/annotations/12.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:2dc4a5b1db9491a5d2ac99ecd3d4493f99a6da029df18c2ba798b78b08e549fd
+oid sha256:3c67327f8b46380d4cd904b54839a1999c778e97a3015544c50278300857406c
size 599
diff --git a/annotations/13.json b/annotations/13.json
index d7a79b20fc8f26a4efce93befca931743dbb6577..3b56b593a36a05036e0289616061eea45d121da0 100644
--- a/annotations/13.json
+++ b/annotations/13.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e79da4895b3681948967cae17eb93c4af6a1f6e35c6569dd9d85bec2f27c57b3
-size 603
+oid sha256:34cecb2c2ad068588d63b5e9b11ce58d8b171fe0d80f48671cafdf8864e69fab
+size 599
diff --git a/annotations/14.json b/annotations/14.json
index d29cb57369c2242749b3200359fe8856a05fcf81..bb4bd0964e6175dbb358f1ad19121b2e62e69b76 100644
--- a/annotations/14.json
+++ b/annotations/14.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:69d8c1b6152a5ad595860dab8a7a038ba2363fe898edf703b94c5430850e37df
-size 604
+oid sha256:2d1d1920a715510beed3dcc9f8283f1c8317c02ae7a501331526408f0baaa34f
+size 600
diff --git a/annotations/15.json b/annotations/15.json
index 7913b8d75012f9cb8165e9f3b876322f35c3b64b..23930e40366fa5600d9a791e146ded46cf22a9b4 100644
--- a/annotations/15.json
+++ b/annotations/15.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:33e2cee2d6dd3f8d24f5e8192e3e9d2b1f75fdb3e5f7005366d95bd0f8c65a16
-size 601
+oid sha256:8efe187df06b6e3bb13f08361db3b2a5af64aa2f27c2cb70816c54638f3e4121
+size 598
diff --git a/annotations/16.json b/annotations/16.json
index e18b940f2dc85eb8325391426aae5a90c23fce24..5efec2328e7089b5b94e7ec53cc2dde7ad49c7ba 100644
--- a/annotations/16.json
+++ b/annotations/16.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:d9ca17d303cdc0c2699babf52ad9871791173d9125361c4ab5f00011c2d5e98e
-size 603
+oid sha256:77c1d454ed0ed0707c03df759181d909792e6a4b4f80df067635f79332bd1d6e
+size 599
diff --git a/annotations/17.json b/annotations/17.json
index 6ea3837a2c0ba7964967f70129a75a5cc0d62bb9..647f26739ab7e5d7ca7d78ee262bb7e2e2021b6e 100644
--- a/annotations/17.json
+++ b/annotations/17.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:2679ff95162447820a529b6bf2de7936a5d935aa0d198a91e7e151497f485fc5
-size 603
+oid sha256:485e34bf8f41c01b3d203794854db509fa3734df14d92715bcab9ba78f3b9887
+size 597
diff --git a/annotations/18.json b/annotations/18.json
index c91027cdcf1f52b58fea7c3d1052d38a80612b50..1e00dfce64eba370e7286143ca0dc252d17f58c1 100644
--- a/annotations/18.json
+++ b/annotations/18.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:44ce2d7df40790c852772554d0724af37dd1a4c539fcf260c92081a969c3fc68
-size 600
+oid sha256:298a76679c25bdba71d35b35d5c9abf2010235f4ced089e7d78ecc46665b3eb9
+size 598
diff --git a/annotations/19.json b/annotations/19.json
index 0c1b310c65b3bf632c7a8d60540eef14d8d5e775..48a93300a9ead5a531e22183c1e5d13f55104216 100644
--- a/annotations/19.json
+++ b/annotations/19.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:1fc784a08c2f4d17f9c2ba9b6215f5c0035d8f50bb41cc24afda9d9d1c28eb0f
-size 603
+oid sha256:fabb44ff012ef2767104fcd66251786db1b1bca1ab4bf0acccb8dba6fd35085b
+size 604
diff --git a/annotations/2.json b/annotations/2.json
index d5c978c5ede723054a3f99a4818c877ab9332d46..355963418fe0b34000471aa37d2afc42beb84a4d 100644
--- a/annotations/2.json
+++ b/annotations/2.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:da3874136481ab904c6d96e814b1ccd3b1ddc310ea15aa1d9364dcb43b211448
-size 598
+oid sha256:74ff12db8cb3c26f6b6eb0bb39c2d601b488bd9ff13e47b541f1f41b7c18f5fb
+size 603
diff --git a/annotations/20.json b/annotations/20.json
index f69e8b9a2abd6a2a6608e64ca458e5176a01906f..dd5e98bca5c8e41ca1e0753e66d47c3d63a11eb7 100644
--- a/annotations/20.json
+++ b/annotations/20.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:cf0bf7a7f37b8b38aa52d3ee17a085d61c456447ab88120e87c16c283de49f53
-size 601
+oid sha256:f94c419ee902508d66d7d946ec726ffca221a748028c498759c33dd98b5e21a3
+size 599
diff --git a/annotations/21.json b/annotations/21.json
index 205c93bb11d75f62eac5221545b1cb7f86c27191..9dd1d43f1c5ad0de31c54f7c1ccddf785fe5c428 100644
--- a/annotations/21.json
+++ b/annotations/21.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:151d551058cb2ead1137513223e1cb146cd1019d6a12efebaab7bb6a04fffddd
-size 604
+oid sha256:11a7b663aceeb4d0003120718219c11c7fd1a4960d03d68ac1b6ae9327817810
+size 599
diff --git a/annotations/22.json b/annotations/22.json
index 8c11c83a2f4387245521cda4ebc78e77824c7c21..5ec0ce86bb0fa47953c46d61d39296d55a9f9a8c 100644
--- a/annotations/22.json
+++ b/annotations/22.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:63979a137eea0ddf6d00a8dfce7b885eda721875d0b5922f287556573909e472
-size 598
+oid sha256:b811fd2ef28bdea6b10084440a59e330f435ffeb3f24b3222c1e5e10e8f1199c
+size 599
diff --git a/annotations/23.json b/annotations/23.json
index 57f766d158fae70166f3fbc3e69e381e3a770587..ac11b8ab5471015efcd71fd40f333d687ffa33e7 100644
--- a/annotations/23.json
+++ b/annotations/23.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:188adbb9b3b166a0f2b63e7517dbfeb6069e9eb82ffd77b6736072ac878cd07e
-size 600
+oid sha256:de070a09c40dd7f9011010b936f3bd98194ef0b09bade41e59fd3c9f5afb0bd5
+size 603
diff --git a/annotations/24.json b/annotations/24.json
index fa2dfbe4ab730302dabbbd621d6a6961c4b1d64d..3738bec44205f4a9413c2b1f1cfa308ca59377be 100644
--- a/annotations/24.json
+++ b/annotations/24.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:68558a4638d80d423221d1d634bad495af4fbd08259017174d891dcf26e3c685
+oid sha256:acfbb5d2adb7e7e1385cbcedd5348e10f9cae1eb0182de4b5acb3205e2748aaa
size 604
diff --git a/annotations/25.json b/annotations/25.json
index ca839ad184ef88f08a47f20c97231b8caa0a0bd0..8e2a356d599b46992dc3346e67a195ea32b85b24 100644
--- a/annotations/25.json
+++ b/annotations/25.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:ecfee633e322957136a3defc6a3819d6690ca1995c08559afa001b7d17d2594d
-size 598
+oid sha256:13c8b60816471a57525dbbc921d7b9f211952ec7eb7c73035226320aea2689f1
+size 601
diff --git a/annotations/26.json b/annotations/26.json
index a9fc59dd3aaacece25118969a84dcfd6f08d38f5..c3904998121b527202f1746c22503a9508b44559 100644
--- a/annotations/26.json
+++ b/annotations/26.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:9f1a6f721bde0eae7a2c429c2f897d1041f821c29ec152aa5d38164a6f1aa773
+oid sha256:89a95c9348889d11e3f62366d320ceb6f3a25f31210b6194e0dd7001fba0fa0f
size 603
diff --git a/annotations/27.json b/annotations/27.json
index 0c38de7e1f76e43cf535c9e5287d4772db890779..ddc8051ae4b3127e61f56e06fe02a6ab18f065c8 100644
--- a/annotations/27.json
+++ b/annotations/27.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:527a0341f9e053606abc7f90d39d12b34e1466291980be2974b2d7dec6f78f3a
-size 604
+oid sha256:7546d72b8219987d553db136b838775c598107ba195e11351ce4987f06a4de6e
+size 603
diff --git a/annotations/28.json b/annotations/28.json
index b922b621421464c15fa4bdd73c77bf6bc7cab927..455e42ffa800d8468495593bf372c3e8d90db636 100644
--- a/annotations/28.json
+++ b/annotations/28.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:3a0ecc4735a4d70050cecd545e96b81728c5eb992bc350875064ea85f4c5b0cc
-size 597
+oid sha256:c6a1b80ca5d0adfa6896b2dcce88a3e096e1d2f67c92d78f6f3e16c06e3dc81d
+size 600
diff --git a/annotations/29.json b/annotations/29.json
index e6af09c5f481ce9a8cde031c57a28fbe6c1434e8..b04cb4eede4976f9dfa07759db865fc15cd31c43 100644
--- a/annotations/29.json
+++ b/annotations/29.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e5033dd30bd01e4265bc580dae418d55f5099488f5dc850b5932423bc179ab49
-size 599
+oid sha256:90ecacf6be600eb4f01b600f384b54dc7a6d6ef887b81fa1531b31ef2003298c
+size 603
diff --git a/annotations/3.json b/annotations/3.json
index 2304b2e2333cb7b008762d3f014ebb6eec07a8a9..2073ca37f2eadd72a1273618bfba576a728c6977 100644
--- a/annotations/3.json
+++ b/annotations/3.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:c21bb39d40481b7a13da31dde7bf6d0e50e60b44000bb1afad3fb4db961eef29
-size 598
+oid sha256:d5df81a7e69c57f30fef4677f0a44c409a6d7cfc5070163c541d5bb8e0bf0e16
+size 603
diff --git a/annotations/30.json b/annotations/30.json
index 5e81c7d001f3a065bbc17e649aa10de9effd2298..2e2db0a09ee348b38a5580a342478ad245da34d3 100644
--- a/annotations/30.json
+++ b/annotations/30.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:cf8d06261505718342611a7a1e8428e62e84bbc29230ac598696ba9174e44412
-size 599
+oid sha256:fc09e8ba3a76b7122b3c5f136c0615746de6a615a6760e02678386e554ca9789
+size 601
diff --git a/annotations/31.json b/annotations/31.json
index 84e72d28f439a16d66643157b328066499ec3e4d..80a9e5987dbb06944ce2bf3492ba775e3de9037b 100644
--- a/annotations/31.json
+++ b/annotations/31.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:a6d8d9add12a1841c2db7ca945d3439477de44ddab6fa637698b700bc30e541a
-size 603
+oid sha256:bea5fd290c25c8b790c5cc5286183fc5b89a6516958adc9473565e75d9f69abc
+size 604
diff --git a/annotations/32.json b/annotations/32.json
index f566a31fe85981e116f19aa25b1eac2bc0b682d4..53cd0bcf79970899ad29448e1c96bb78153800ae 100644
--- a/annotations/32.json
+++ b/annotations/32.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:16643bff93e419d5ed5d97cf40bd982ad00f8c32c60034f6851d0241c1a7a57e
-size 601
+oid sha256:a27a0b6953772146b1beb70d1d13347ed684252b0dd7407add9a1cf411241d6d
+size 598
diff --git a/annotations/33.json b/annotations/33.json
index 16ab2b7614c443e4d3e404cb3a380515bb85565f..725c38546efc29298e56dd1dede50440bf56893a 100644
--- a/annotations/33.json
+++ b/annotations/33.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:0e715b42575c0b3975e235f643d5894ab153869791352424d6d12efda6a6134e
-size 602
+oid sha256:3247c205a22b66febd2fed87e142f76238bd055c8aefe091d8ace9e60d37353e
+size 600
diff --git a/annotations/34.json b/annotations/34.json
index eea0e6cf1fc3a9cd57b657fe0961ed798f515ad8..1a6b50822feb8a06e0287d2a35305c88b1fe58a5 100644
--- a/annotations/34.json
+++ b/annotations/34.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:af3acea266200bf5fe7aa51d8f0e8c3c6e4eb67eb0eb616d71fcd4bdfdc3315a
-size 599
+oid sha256:3e4d0e425cb65ebbe9bd6b646f62e980731b3b58eb2f597d6a9c033ed50b1dcf
+size 604
diff --git a/annotations/35.json b/annotations/35.json
index 200fda3e234855959019d65c8ad4a2aeda54b396..52b95c2fedda0245e4b21f0e7c9af915737924de 100644
--- a/annotations/35.json
+++ b/annotations/35.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e4b005252a3fd75a33bdaa737ce043f42cbecbde62fcb0bb3c25e3c497236039
-size 603
+oid sha256:2d6531434f6bee9b389c99e301d7cbd63352f7420b02303f40b06d42b8fb8f24
+size 598
diff --git a/annotations/36.json b/annotations/36.json
index 0b24f387f9fbab9ec637003b7402f65b1552ac99..0f36bcfb254599ab159adb7b04d7c1cfac56ad20 100644
--- a/annotations/36.json
+++ b/annotations/36.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:0f98b87bf4f7ae0aba45a37ba03815dcc1b22e581c14f3d8cbca6c8df470c2c9
+oid sha256:2d7b57ab0b7498a8e8b9a6e4bcf4fef778dbcc24f37980e411f0bd0c5895b6f3
size 603
diff --git a/annotations/37.json b/annotations/37.json
index eae95f1fda6c780cc141ebb93d63df03fd9d36c3..61610050771da785c6b9bb5c0a41e828244aef3d 100644
--- a/annotations/37.json
+++ b/annotations/37.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:544a58c6478395ccf93365e65d5d06a7dbbf4c4b5f160ac7284e0305afd86de7
+oid sha256:f8eff753d99a77beafc4d658d48798f3b8f805ec29d261acf8bf2d5ccaad8266
size 604
diff --git a/annotations/38.json b/annotations/38.json
index 9f3537f7a9323027328a5272eba38016eaec3912..6e5ea1fb6e7c30a22438b2125fb66ddd65f3e4b4 100644
--- a/annotations/38.json
+++ b/annotations/38.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:d123ebebebe40564ef5a54eabbb525c6accea49a1acce69da276ea02746c9a45
-size 599
+oid sha256:001113f9776744e4363cb0aa05a1b61508fd229c44bca6c79ba449e8646a4ac0
+size 597
diff --git a/annotations/39.json b/annotations/39.json
index 2a8aa8f454b6bf64b98178acac6f27aa6c2ab950..c0588b7375c2faf1c5597e3e9e10ef29bc611ad2 100644
--- a/annotations/39.json
+++ b/annotations/39.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:7e84937db08c3d9dfc693e3d5f2ff59abde67ffe152dd6ac7ff6ca5ac590e5a1
-size 602
+oid sha256:6d014a6f7c864f5f0e5c699eed4fb68bccf2f87cda50f7010e8dafb8df786b4e
+size 599
diff --git a/annotations/4.json b/annotations/4.json
index 9298c305c29f8e57db3318de2605de9a3b8cd224..5a1b0988535c7fb3e5d9d163e9c12f64e3d4e6f1 100644
--- a/annotations/4.json
+++ b/annotations/4.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:fada9d8f64aeac581070bc17c50e4958b747c4ed82db274f1b9ab03b7c2d2a16
-size 599
+oid sha256:f76fbc7b6ab0afebb8dbc2b60569a169df87fba076712cf641c9698de7889fc0
+size 598
diff --git a/annotations/40.json b/annotations/40.json
index 1a4bd393bbc87f9febafb320222f1460cba9fa89..530a1082ecca105aad421bff859f2e152840e08f 100644
--- a/annotations/40.json
+++ b/annotations/40.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:ed11a5d7ecb26ab4472adf5ad1e883ca97eacebb6993274ecc625059864aa506
+oid sha256:c1898f94401693ec68c2be59cca229c4fafeb0a34487182b873d3450245af20e
size 599
diff --git a/annotations/41.json b/annotations/41.json
index 8cbb6bb3be2da695d8ed00300ddd39425d3b6b57..ec12a34a3ef7a0abea2343aa85184f54e1f5f067 100644
--- a/annotations/41.json
+++ b/annotations/41.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:aa3e01838386b3eba88cdd26ddecf11157c63d4de32ca906bb17203d1a0e53f9
-size 599
+oid sha256:76505fa632d03424d471c033efe915fa1739ba6137339391c9efdc140ecfc9d9
+size 603
diff --git a/annotations/42.json b/annotations/42.json
index 89cb99421cb2fa2ae143b8c7a7ca855e42e588c2..20a00a73fdb8b103991b9bd7231bdb53b8a876dc 100644
--- a/annotations/42.json
+++ b/annotations/42.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:12b756b5b068508c6cccc2bd237e4091f4cfb30c550984edce2f0bf53ad52401
-size 604
+oid sha256:ac08904075d8a572c2085847d0bea1d08c8a29c882781ddb96b0b7725dc38be1
+size 601
diff --git a/annotations/43.json b/annotations/43.json
index f67f4ac7da20616abbd7da50c9437cfbddb24335..3dd6fec0764da6ccf485df3d157b11139908976c 100644
--- a/annotations/43.json
+++ b/annotations/43.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:96afa146072eb9e40815537fd7fd172fd87cb3ea11c972779b380918a1d4af94
-size 598
+oid sha256:853c519cc07ab91045859ae6a1aeca4d4c9bafb757c2b4838c66bea5f6e17832
+size 602
diff --git a/annotations/44.json b/annotations/44.json
index 7487dc8de8b162b235fb2048fc3120c5b49fde0e..195705fbdb7f4e90cc5bec9505783de77c3f9e2b 100644
--- a/annotations/44.json
+++ b/annotations/44.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5488b28c3ffb8cce3a9876e1fb4ad2f175b48abf021578e6a837864facbff9c5
+oid sha256:873b8fba2c07856f1a12b707ffd1a387da314e2d3d553fc4ed30cc363667de50
size 599
diff --git a/annotations/45.json b/annotations/45.json
index dc4364445ed0160ac65b8cb34659d057e99f695c..da6d4f522d57d903c1b088bf0c7ff129743145fa 100644
--- a/annotations/45.json
+++ b/annotations/45.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:230f042f7d8903212564402a6572c6285bad0f61e68439e3119e8eeff60944b0
+oid sha256:37f2bdd41890d2b09697cd4a77f49212b075f321adbd6004921eb155a6db317d
size 603
diff --git a/annotations/46.json b/annotations/46.json
index 90b2baf6ffdaf78d8bd004a2d054516edfe48286..a30e5c6883d5d7e6d481cea79f3a5d9743c70502 100644
--- a/annotations/46.json
+++ b/annotations/46.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:77d1d2eea1a599dd0b12a4fbdfac07986882da3a36579edb035f0a92cd072d3f
-size 604
+oid sha256:1f806ffe2111744774b947bb505cbaaba7e74a7a9ab8847574dc28d72f4a4d82
+size 603
diff --git a/annotations/47.json b/annotations/47.json
index 7d77d79ed5472e510c4f79cd5a7ceba6187442c9..25e653478ede178674475c11326dfa918a044b07 100644
--- a/annotations/47.json
+++ b/annotations/47.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e634ae6ea66770e628dd3b1fa0a72d85ebf6972114942e5ceddc87cce8a609d4
-size 596
+oid sha256:81fbb412e1ea79a90f7aff0decbf198c23661c4fda1a3980d4cf6eecb59b1a60
+size 604
diff --git a/annotations/48.json b/annotations/48.json
index e47db21ce47c8b51a2fb0f32c2942413933eb87d..5eadf624d34c40cfed6516e9d9468696844d0a7b 100644
--- a/annotations/48.json
+++ b/annotations/48.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:0886ed5ebcf67575133b0ec02117a6840e6a2e52f5793e8f462f37c4b855dd92
-size 598
+oid sha256:3e4b73cae21e9e732c43bcf8eed2f066b1acc410cdec93a2d938d57c77834b99
+size 599
diff --git a/annotations/49.json b/annotations/49.json
index f71b73d2b73b27b7725aaac143644fe19d8c737e..63b9ff56291376021234fa0bf1ee0a22f533c718 100644
--- a/annotations/49.json
+++ b/annotations/49.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:c0dea82e553ced66103aae4d34438e385b51db7775fd0c67608740b8f447421c
-size 598
+oid sha256:41c85e6a287c9fd7be56d28258620b8c9f87559a739474a0049553b47dec5be3
+size 602
diff --git a/annotations/5.json b/annotations/5.json
index 2655d7958330a973ea6155f8da500169eceb3292..c0c5c5c2cac5020fd055d71a7dcfe79374995eec 100644
--- a/annotations/5.json
+++ b/annotations/5.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:b66009c1c769858493b8850dc3e1005df9d93d44a5bbd5ada2fb38760af80b14
-size 597
+oid sha256:7876c553652aa45ab34695e4d00480d5f8c7ab1ff040bb88fc37b57e6637fcb5
+size 602
diff --git a/annotations/50.json b/annotations/50.json
index 67af47e7eadc97226236ec2c86431acddb00a60a..1377c94bff62621adbf494556fecdfb4ed0c7c48 100644
--- a/annotations/50.json
+++ b/annotations/50.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:59b84257140f06d316d1866fc46f9c722b605627e91dad1320d10e2b30271905
-size 603
+oid sha256:5aeaecec38b4e49224b80c2575376af2e31e67b6b87d58647c9b33a78b1a3f4d
+size 599
diff --git a/annotations/51.json b/annotations/51.json
index 5216882041b2bc42de322328a5473e5466944c43..427bf06c961b838fc942a29767a001095ce3a13e 100644
--- a/annotations/51.json
+++ b/annotations/51.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:90ad78fc1acaab278a7e5e0eabea285f1f1786ab5082157d5b148f75187fceee
-size 602
+oid sha256:8723d99b04f84f24ed885eb66cfa2302ab56639777c2b8bbdd8b25dea75dc7d5
+size 599
diff --git a/annotations/52.json b/annotations/52.json
index 5b207d62ad34ef60337c41e4a08338deee609ae6..8c1a9acf05d110c32508e3150ee177f6aa434017 100644
--- a/annotations/52.json
+++ b/annotations/52.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:a38da8fdf576341f3cd0a100c5c022e9e1c7769896cb075281dbefec1f8d1758
-size 600
+oid sha256:906e3782df5b38a6c5a0459970988d23fcb14973df5e2f15ecfeea9a67a80d4d
+size 604
diff --git a/annotations/53.json b/annotations/53.json
index fa4ee876e3a365063abbd5dbc0c91d045f4d8699..5ff93555c2d649d105ce5f80b521aab44838d055 100644
--- a/annotations/53.json
+++ b/annotations/53.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:d36a9469a185727791f84cd87f461a192b50dce8f067d0db1dc562ce5d2bb027
-size 601
+oid sha256:dd7c32ba7b9f0daec60367d11f41a958661109a7b8e4270d1ca5f599bd833c87
+size 598
diff --git a/annotations/54.json b/annotations/54.json
index 68a7b777e18fd4728b0c35b6c831198b7c3abc34..a594ec46f2df37454d576c8d88aa219f28630d62 100644
--- a/annotations/54.json
+++ b/annotations/54.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:9e899bffbef9237ed13e60aad1967c84b78f4581ed26ab98f2772384fb31cb5a
-size 598
+oid sha256:06d6597ece8f7edc5ea3d618b1f2028a872a07dc9c25460f9ca1f588e69413c6
+size 599
diff --git a/annotations/55.json b/annotations/55.json
index 4fe953a1c03604ae0e402f4acbb29aefe31dc6a2..98adef4ac29faac3acc78a99205800cad783e753 100644
--- a/annotations/55.json
+++ b/annotations/55.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:2035c6be28e5667f46617d6b45cd79a96102a265a5f529156d00f932f08408f9
-size 602
+oid sha256:ed22c153b40a37cf3eddf7a76e5de6a4a64be8a0037cc6d2649e87cedc360822
+size 603
diff --git a/annotations/56.json b/annotations/56.json
new file mode 100644
index 0000000000000000000000000000000000000000..97b143fe3dfeab5205389cd7838d22daa39671a3
--- /dev/null
+++ b/annotations/56.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d58979a989027703230b00ebad36b4bb36c6852b012c9ed8f947921110a21d48
+size 604
diff --git a/annotations/57.json b/annotations/57.json
new file mode 100644
index 0000000000000000000000000000000000000000..c7d7025f77cc9c8c406d21617afdc047b68560ae
--- /dev/null
+++ b/annotations/57.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e64ecc9865f16c3a8e9a3e8c25914a822ffcbdb8b5599b5e4c2fd20a68ee6d8a
+size 596
diff --git a/annotations/58.json b/annotations/58.json
new file mode 100644
index 0000000000000000000000000000000000000000..20c51889bbcd547bdbe1d9aab1eb52fd452d3c63
--- /dev/null
+++ b/annotations/58.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9c1de3e5f185c389046a64620472a746c501c9866fd6ab5a6ec190f78381290a
+size 598
diff --git a/annotations/59.json b/annotations/59.json
new file mode 100644
index 0000000000000000000000000000000000000000..ccb17ca288d90f7833c5ced5a47a5d1b10df0b1e
--- /dev/null
+++ b/annotations/59.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:577afdfa64451cd10060c97e117d4ecfcbda76aac893fe4298d602cb0bc0a930
+size 598
diff --git a/annotations/6.json b/annotations/6.json
index 8cf801cf44fd9a55b7d08c3dfe431f3f7a42f2de..f1a619aa60cc667e0264832a15cac542251e54fb 100644
--- a/annotations/6.json
+++ b/annotations/6.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:44530418dff6a7dfed752b8912600e540cb2647ab3dc77e4003b0cc305a83bb3
-size 598
+oid sha256:1e8b7ceffb563084d97cb75141cfd5ce481db3f0211fa005917fd2e0c5bfa190
+size 599
diff --git a/annotations/60.json b/annotations/60.json
new file mode 100644
index 0000000000000000000000000000000000000000..3fff97a482c85e68cbdc7e167f254bd4f9ff3f4a
--- /dev/null
+++ b/annotations/60.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b2bc51ae6aa8f1d97db2dc829b228aed66fa7c21d22fc9c429281bf35f365e69
+size 603
diff --git a/annotations/61.json b/annotations/61.json
new file mode 100644
index 0000000000000000000000000000000000000000..a54716cd71cb9d790d062a925ea21cf89b49ead6
--- /dev/null
+++ b/annotations/61.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ea2e2fdcd4c97fe1fb28b5022af087db5b255e1366360eeff744e48d77185d1
+size 602
diff --git a/annotations/62.json b/annotations/62.json
new file mode 100644
index 0000000000000000000000000000000000000000..467db78ee4285154240e5ed89d751faceb6d24fd
--- /dev/null
+++ b/annotations/62.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5a28f51ca21197f8da369a9cf4817409af15a03fcb99465632192cf2d9243a93
+size 600
diff --git a/annotations/63.json b/annotations/63.json
new file mode 100644
index 0000000000000000000000000000000000000000..8ec5f2bb0de55d9d97b821f7d5a35e4ea12482f6
--- /dev/null
+++ b/annotations/63.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:98926afd75384a863298aabb7684ca8dcc8ea66cef449863d56d4be185b6ce09
+size 601
diff --git a/annotations/64.json b/annotations/64.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3e1ab360dc9bd5a864f0fb49204b252df8784e3
--- /dev/null
+++ b/annotations/64.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5e1092162834e16484a9b26b4ab7547a79b6ca7eeed34c11d07f71dbfa5763ce
+size 598
diff --git a/annotations/65.json b/annotations/65.json
new file mode 100644
index 0000000000000000000000000000000000000000..a67381e8d6c0546d8275bc032cbdf21aaf8fc485
--- /dev/null
+++ b/annotations/65.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7498966710776245dac5aa8533c609190d65697484ca65e300d2d1b549b7634d
+size 602
diff --git a/annotations/7.json b/annotations/7.json
index 697ed2f906da3ce43a8428b8fe96695825bbfb38..8a9d540a5c4f69a8931fc50aa9f7095f7baa443e 100644
--- a/annotations/7.json
+++ b/annotations/7.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:d1a9d76025e1642936e08a92f7a3cf297f5330f9cb0a1cd666f00b7948c13211
-size 596
+oid sha256:20cfd4fde1180c87c09a2cc749bafe21307e643d19e78b9275cc90df86baaabc
+size 603
diff --git a/annotations/8.json b/annotations/8.json
index 26a47e59d8b916d16a04f35d75a6c5dbb45e5a53..1ef95ae4ace4805eee0deee1816816716e5675f7 100644
--- a/annotations/8.json
+++ b/annotations/8.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:6d2d5d68baeae4fc4ae3b9ede2f2163f1f319e875f1f3a76924b294cc434dfe2
-size 597
+oid sha256:3ed45fcf5c8230e6dc23717daa0eabdc63445ce6be0a9e58221ce272d813f704
+size 602
diff --git a/annotations/9.json b/annotations/9.json
index 4315c9a7324fa3327d9ba3ee54571a58eda5b627..eac798fa51ab05443be3bde868dce7b0a4165190 100644
--- a/annotations/9.json
+++ b/annotations/9.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:8c3187f5ea5dca33d7303e0c5804a708643d4df652f0e5970715380d2a5bf33c
-size 603
+oid sha256:ee941d4cc8c62aa85dd248b7bec7685bec7df17cf9dce613500f2548f182aa32
+size 602
diff --git a/audio/56.mp3 b/audio/56.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363
--- /dev/null
+++ b/audio/56.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1
+size 1524716
diff --git a/audio/57.mp3 b/audio/57.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7
--- /dev/null
+++ b/audio/57.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b
+size 4384556
diff --git a/audio/58.mp3 b/audio/58.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363
--- /dev/null
+++ b/audio/58.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1
+size 1524716
diff --git a/audio/59.mp3 b/audio/59.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7
--- /dev/null
+++ b/audio/59.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b
+size 4384556
diff --git a/audio/60.mp3 b/audio/60.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363
--- /dev/null
+++ b/audio/60.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1
+size 1524716
diff --git a/audio/61.mp3 b/audio/61.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7
--- /dev/null
+++ b/audio/61.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b
+size 4384556
diff --git a/audio/62.mp3 b/audio/62.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363
--- /dev/null
+++ b/audio/62.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1
+size 1524716
diff --git a/audio/63.mp3 b/audio/63.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7
--- /dev/null
+++ b/audio/63.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b
+size 4384556
diff --git a/audio/64.mp3 b/audio/64.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363
--- /dev/null
+++ b/audio/64.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1
+size 1524716
diff --git a/audio/65.mp3 b/audio/65.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7
--- /dev/null
+++ b/audio/65.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b
+size 4384556
diff --git a/transcripts/uncorrected/1.txt b/transcripts/uncorrected/1.txt
index b7896c7f96af437ec44fecaba4cd587b9fd8c785..a6c4aa0dea473932cb03dfed9978e4ce2702e4a1 100644
--- a/transcripts/uncorrected/1.txt
+++ b/transcripts/uncorrected/1.txt
@@ -1 +1 @@
-Okay, so the basic validation of the app is good. It functions according to spec.
I'd like to just remove the emojis and please take a look at the screenshots of the app as it's currently implemented and see if you can think of any design and UI optimizations that would make it even more friendly to use.
For transcribe and optimize, we definitely would like to have a label transcribe and optimize.
Maybe let's have a homer text or an about section where we describe to users the differences between these two functions.
\ No newline at end of file
+For Frigate Plus, what I want to do is as follows. I'm looking into getting a new, getting a server. And I'm conscious that you want empty labeling and identifying labeling. Both of them. So I'm going to curate those or gather those on the cameras.
And now, and if slash when I do the server upgrade, the home server upgrade, and then I would move over to Freigate and then actually start using the train models.
Worst case scenario it's just $50 and I never actually end up using the stuff but I'm hoping that I will at some point.
\ No newline at end of file
diff --git a/transcripts/uncorrected/10.txt b/transcripts/uncorrected/10.txt
index b57417cde1c5303a489404bcd259f827ea2cf7a6..839741289e07ba1b0cbfd1312e40cff52a27de8a 100644
--- a/transcripts/uncorrected/10.txt
+++ b/transcripts/uncorrected/10.txt
@@ -1 +1 @@
-The problem is that we looked at this before and when it reboots the router it's not bringing up the Cloudflare tunnel.
So see it's working now, but just see what can be done to make sure that this, we need to make very certain that this does start automatically on reboot.
\ No newline at end of file
+I'd like to create a voice recording app for Ubuntu Linux. The app should have the following functionalities. It's a voice recorder, and it has the essential voice recording functionalities of record, pause, stop, and restart. The restart scraps the current recording in cache and restarts the recorder from zero.
For the transcription process, I'd like to institute the following workflow. We'll use Google Gemini API and ensure that we're using Gemini 2.5, which supports multimodal input, including audio. The recording captured from the user should be optimized for this purpose of voice capture for speech to text. By which I mean, I would suggest that we record in mono. We capture the recording in a space-efficient format. We're optimizing for creating a voice recording that is not necessarily the greatest and most detailed of audio clarity, but which strikes the best balance between quality and space efficiency for transcription.
The voice recording will get sent to Gemini for transcription with a system prompt that instructs it to transcribe and also clean up the recording by removing filler words, adding sentence structure, and adding spaces. There can be a second button which is called transcribe and optimize, and the transcribe and optimize workflow is the same except that the system prompt is a little bit more instructive and it tells Gemini, in addition to those steps, to remove filler words, add sentence structure, paragraph spacing, and try to optimize the text by adding headings, organizing the thoughts a bit, and removing repetition, so it's a little bit more aggressive.
In both cases, the transcription, when it returns from Gemini API, will populate into a... In fact, Gemini should return two things, a title and a text. The title is a short title for the voice note. The text is short is the text, and so on. The text is formatted in Markdown; it should appear within the Markdown within the text editor. There should also be a clipboard button, and finally, the user should be able to save the note.
When the user chooses to save the note, it will get saved to a predetermined folder which the user selects as where they save voice notes on their operating system. And it's saved there as a Markdown file with the title in machine-friendly format. So if the voice note title has spaces, the saved file name will just replace those with hyphens.
The app would be run repetitively such that if the user wants to record a new note, they start again, and when they do transcribe or transcribe and optimize, it will send and then overwrite the previous transcription. So the user has to click the save button in order to save it, or there can be an option for auto-saving configurable as a user setting.
\ No newline at end of file
diff --git a/transcripts/uncorrected/11.txt b/transcripts/uncorrected/11.txt
index 4b73fda258009e56d8fc1e8ade93312193c751d0..b7896c7f96af437ec44fecaba4cd587b9fd8c785 100644
--- a/transcripts/uncorrected/11.txt
+++ b/transcripts/uncorrected/11.txt
@@ -1 +1 @@
-I recently picked up a Samsung Galaxy 6 smartwatch just to try out the idea basically.
And my only need was really for a dual time display, local and UTC, and the day display.
It was about $100 give or take, so a very basic entry level that would sync with my OnePlus.
If it turns out that I really like it...
The other requirement was a good microphone for voice recordings.
Even if it's not the best and my phone is better, it would be nice to be able to use it for that because I take a lot of voicemails during the day.
If I turn out to really like it, what would you suggest as a good upgrade?
I tend to like more everything that's getting under the hood with technology.
So I wasn't thrilled about buying a Samsung, but it was what was available for the price point approximately.
\ No newline at end of file
+Okay, so the basic validation of the app is good. It functions according to spec.
I'd like to just remove the emojis and please take a look at the screenshots of the app as it's currently implemented and see if you can think of any design and UI optimizations that would make it even more friendly to use.
For transcribe and optimize, we definitely would like to have a label transcribe and optimize.
Maybe let's have a homer text or an about section where we describe to users the differences between these two functions.
\ No newline at end of file
diff --git a/transcripts/uncorrected/12.txt b/transcripts/uncorrected/12.txt
index dd20ba87d4e27f321810d0504c0736c4e154d407..c4062c3c839f500b2242b1b7628a7ef9e4bd26f0 100644
--- a/transcripts/uncorrected/12.txt
+++ b/transcripts/uncorrected/12.txt
@@ -1 +1 @@
-I recently picked up a smartwatch from Samsung Galaxy and I'm curious one thing that would be really helpful that I thought of.
I'm always stressed about losing or potentially losing phone wallet keys.
And for all of these things, Fun Walla Keys, I use Pebble Bee Tracker now.
So I'm wondering if there's any way or app that can do something like geofencing in which if any of the things are...
Maybe you can turn it on and off at certain times but they're in.
If they move out of the zone you get an alert notification if the smartwatch vibrates or whatever.
\ No newline at end of file
+I would like to create a docs folder in this repository.
The docs folder should be separate from the code and it will be the place in which documentation is gathered.
Ask the user if there is any specific functionalities or aspects of the application that the user wishes to document in this folder.
The docs folder should be mentioned and linked in the readme, directing users to it for more extensive documentation than can be found in the readme itself.
\ No newline at end of file
diff --git a/transcripts/uncorrected/13.txt b/transcripts/uncorrected/13.txt
index 1c658e5f3d7436116c6a372301158c4d76aff497..ec565a8b602b1abb235b4c8a5616370d701f5be7 100644
--- a/transcripts/uncorrected/13.txt
+++ b/transcripts/uncorrected/13.txt
@@ -1 +1 @@
-Something that would be very useful would be the following. So I use an app called Voice Notes for Android. And it's a voice recording app. It's called Voice Notes. Now, it has one fatal flaw, in my opinion, which is that it doesn't have Bluetooth support. So when I'm out and about, like now, I literally hold the phone up to my mouth, and it certainly gets me much, much, much better recording quality, but I kind of look a little bit goofy and I feel very self-conscious.
So there's two things I've thought about. One is finding a voice recording app that has more robust Bluetooth support. I think there are two options really that I'm thinking of. The first is finding, as I said, a voice recorder with very robust Bluetooth support and using a Bluetooth microphone to record with. The alternative, because I'm seeing these products come to market increasingly, is to use a wearable Android device, which probably wouldn't be that different, maybe even physically. And I think the more I think about it, the more I think about it, the more I think about it, the more flexibility. Rather than being a Bluetooth accessory, it's running, I guess, Wear OS, and maybe that would give you more flexibility.
I'm trying to think of the pros and cons on which would be better. I veered towards the wearable approach as it seems to be what's where. I don't know where the market is going with this concept, but I'm curious to know what your thoughts are regarding the pros and cons.
\ No newline at end of file
+Please go through the markdown files in this repository to make sure that no emojis have been used.
If you find any emojis, remove them.
If emojis have been used in place of proper icons, then identify an appropriate icon library that could be used to provide the emojis.
Remember that if the icons are well known, such as the icons from major social networks, these should be integrated via a pre-designed library.
Do not attempt to create custom once-off SVGs for any logo that likely already exists in a professional library.
\ No newline at end of file
diff --git a/transcripts/uncorrected/14.txt b/transcripts/uncorrected/14.txt
index 59abde81206328bbd33b6fe792b0dcf161a7d148..97dc205e9d7b77068f580705263f66d3a0ce82b0 100644
--- a/transcripts/uncorrected/14.txt
+++ b/transcripts/uncorrected/14.txt
@@ -1 +1 @@
-So there's a lot of these AI voice pins emerging onto the market which are designed to be wearable devices.
So I record as I'm doing now quite a number of voice notes when I'm out of the house.
I use an Android app called Voice Notes that I really like but it doesn't have support yet for Bluetooth microphones.
At least not support that's reliable.
So I have to hold the phone up to my mouth, which really kind of degrades the experience.
As I started, I want to actually start doing, going on walks expressly for usually the moment I do this when I'm going places.
I just happen to think, but I actually want to start taking walks to jot down some ideas as a healthier way of combining work and getting out and getting some exercise and getting some sunlight.
And for that it would be really nice to not have to, you know, be holding up a phone to your mouth for 30 minutes or an hour or whatever it may be.
So I was thinking about wearable voice recorders but a lot of them from what I've seen are these kind of closed ecosystems in which they sell you can't just buy the hardware.
They'll sell you, they'll do like onboard transcription or they'll sell you like a Cloud Transcription Bundle.
I'm really not a fan of on-device transcription.
I mean I think it works but in my experience it doesn’t make a lot of sense to me just architecturally.
I think why do stuff on device that can be done in the cloud cost effectively?
And you got, you know, you can run vastly more powerful models in the cloud.
You don’t have to worry about quantizing models on a very, very small piece of hardware.
And so I guess what would be great for me, but Android, when you're looking at wearables, Android's like the obvious sync partner.
So you just need to get the voice of the audio data from the recording thing to Android and from there you can push to the cloud and then the rest is back-end speech and text.
So what I'm saying is that I'd love a modular solution that could do this.
A pin that is just hardware, just recording this audio sync, maybe has its own app, or maybe can be used preferably with third-party apps.
And therefore you can kind of build your own voice recording stack around it, and you can use your existing Speech-to-Docs transcription workflow.
And you don't have to subscribe to these very kind of, I forget the word, walled gardens in which the vendor chooses your force into this package that's often very unnecessarily expensive and you're paying mostly for overpriced transcription.
I'd prefer to just get, invest in good hardware!
\ No newline at end of file
+Go through the website and see any place in which icons have been implemented which were custom designed but which could have been implemented more efficiently through using an existing icon library.
Pay particular attention to icons for common uses such as social media icons which exist in many libraries, as well as emojis which may have been used in place of icons.
This approach should not be followed.
If the user uses an existing icon library that you can identify, then replace the custom coded icons with the most appropriate matches.
If the user hasn't yet implemented an icon library, provide some suggestions to the user, focusing on those libraries which will best match the aesthetic which they are following in their designs.
\ No newline at end of file
diff --git a/transcripts/uncorrected/15.txt b/transcripts/uncorrected/15.txt
index 65b441258e93b436681d73b0928dd3ea5da97777..0f9f01aeb1efa9b56a188dbecffed93a32cfd7c5 100644
--- a/transcripts/uncorrected/15.txt
+++ b/transcripts/uncorrected/15.txt
@@ -1 +1 @@
-I picked up a Samsung Galaxy FE watch. I checked compatibility, smartwatch. I think it's in the 7 series if I'm not mistaken. What is it exactly? It's a 40mm smartwatch. Where does it fit in their line up? What's the difference between this and the Watch 7? I just went for this one because it was what was in stock.
And is it shower proof, waterproof? And I know it's a glass display. So I'm wondering how tough is the glass? Or is it tough at all? I just asked because it's a fitness watch. I assume they make them a little bit more ruggedized, but maybe that's not the case. What does it say?
\ No newline at end of file
+This repository contains a collection of slash commands which I use with Claudecode.
I capture some of the slash commands using speech to text.
The slash commands that have been captured with dictation frequently lack elements like punctuation, paragraph spacing, and they may contain occasionally words that were mistranscribed.
Please recurse through the directories and correct slash commands which you can find which were missing these basic textual features but do not limit your fixes to only I don't want to go into those containing these defects but rather consider in your editing any slash commands which need to be rewritten for optimal intelligibility.
\ No newline at end of file
diff --git a/transcripts/uncorrected/16.txt b/transcripts/uncorrected/16.txt
index b51e9cf9eacfa8f539ba2c6270fbbbdcb80adeda..35a25c66c27c2d44f0a64ca785442bcb2b03db07 100644
--- a/transcripts/uncorrected/16.txt
+++ b/transcripts/uncorrected/16.txt
@@ -1 +1 @@
-So there has been this vast development in multimodal AI recently. I signed up for Replicate and FAL AI. And what really strikes me is not only the diversity and number of models out there, but also the large number of permutations in multimodal AI, meaning what input can go to what output. And I think what I find difficult about it at the moment to navigate as a, let's say, creator. I created a few music videos just as kind of fun experiments. Is that there's so many different models. Like just in, let's say, the one series, there is maybe 20 different models to choose from in FAL, but they all do slightly different things not only in terms of the resolution and the parameter and the max duration but also in terms of the modalities, and they don’t really allow you to filter on this at the moment.
So what I mean by that is if we take an image to video model that animates still images to video, one model in one might create video without audio and another might create video with audio. And that's a very significant difference. But there's also a significant difference in do I prompt for the audio? In other words, is it going to be text to audio and render out audio that then gets added to video? Is it reference audio and reference image? So when you begin opening, all these differences really matter because I might want to filter on ideally, let's say I wanted to look at image to video models, which could generate lip sync to audio from a prompt. That might be one use case as well as the video.
In another use case, I might want to create a dialogue video. Let’s say I have a still image of a crowded market in Jerusalem, and I might want to print something like create a video from this image; the background soundtrack is background conversation noise in a bustling marketplace with vendors yelling out sales prices. That's just an example of the kind of background noise and the ambient noise that we have in this market I'm thinking about.
So what I would like to do, I created this repository which I created here. I'm trying to think of a taxonomy for multimodal, really for my own reference, but also as an open source project. Exploring the permutations of multimodal that are possible. So in the preceding example, we might have one definition of a modality might be still image to video without audio. Another modality, and then the description. Another modality might be still image to audio without lip sync. Another modality might be still image to video with lip sync.
But then you might have some sub modalities being still image to video with lip sync with reference image, that a reference to image. Another sub modality there might be still image to video with reference character reference in video. Another might be still image to video with audio with character reference through a LoRa (L-O-R-A). And I reckon that if we really enumerated the modalities we might get to hundreds if not thousands of different ones. For example, in FAL, just to talk about the long tail, there's music to music, which is music in painting. There's audio in painting, well, yeah, audio in painting, which I'm thinking aloud here is, I guess, distinguished music in painting is a subset of audio in painting, that it's melodic.
So that's the objective. I think that the JSON is the obvious format in which to attempt to denote these. And what I'd like you to do as the task definition is try to do this basically. Try to enumerate, list out a hierarchy, some kind of taxonomy representation that makes sense. We could try to create a baseline and then explore various ways of mapping out the hierarchy, manipulating the JSON so that we look at different ways of organizing it. So I think it would be useful to have like a first entry JSON in which we, and later maybe I, as new modalities come to, and we can maybe have very interesting labels might be their point of maturity, example workflows, use cases, etc. There's an awful lot that could be explored within these parameters.
\ No newline at end of file
+This repository contains a folder of screenshots.
The intended use of the screenshots is that they will be integrated into the README or other documentation to demonstrate the UI of the app.
It's important therefore that the screenshots have descriptive file names.
Please rename the screenshots for this purpose and integrate them into the README in the most appropriate section.
\ No newline at end of file
diff --git a/transcripts/uncorrected/17.txt b/transcripts/uncorrected/17.txt
index 7691086737e7862b23604ec7c3b5a56071521899..c3e6aec46313e6c703697e4fcc48f050db3015c1 100644
--- a/transcripts/uncorrected/17.txt
+++ b/transcripts/uncorrected/17.txt
@@ -1 +1 @@
-Look at the Facer's, I'm really surprised for no one's made a Hebrew date watch on the Facer creator, but it's probably the developer studio from Samsung is the way to go for that. And I want to edit, like the one that I have slightly, I can't find the perfect one, people put too much on them. I'm looking at the face I got from Facer now and they've added temperature, sunrise, sunset, neither of which work, I guess the integrations don't work, but who wants that on their watch? These are all like anti-simplicity. I just want... It's almost perfect, but they added these stupid unnecessary features.
Maybe on the Facer creator marketplace, I can just create one that I want. Maybe that will actually work. That's probably the easiest way to go. But if that doesn't work, I can create one on Github and open sources, the font that I want, but the Hebrew one would be very special to me. It's definitely possible.
I'm looking at my desktop display. It says 30 Tishra 5786. So for sure from the HIPAA Cal API the data source is there. And I looked last night and it seemed that people only had created sort of ones for from a very different reason.
The VoiceNote data set I really want to create as well. That's actually a very important project, the GUI for adding that I have a backlog of literally thousands and it would form the basis for my classification model which I should probably note out and that's a real model I can build for the idea as well.
\ No newline at end of file
+What's the most professional way to install a package on Linux? If I create an executable and copy that into the directory on path, such that I can call it, is that considered a worse way to install applications than through a Debian package?
\ No newline at end of file
diff --git a/transcripts/uncorrected/18.txt b/transcripts/uncorrected/18.txt
index eaea5b9166faabd9642d0c97478ecd6f6fd86d89..72dd47f2927e95f6a555120604796efb0f7010e8 100644
--- a/transcripts/uncorrected/18.txt
+++ b/transcripts/uncorrected/18.txt
@@ -1 +1 @@
-Okay, so I've just configured. VS Code is very, very important. I've just configured automatic updates, and I asked Claude, I said, why am I not getting them? Why do I, it says, you're out of date, download the Debian. And I said, I don't want to have to download a Debian every time, and I really want to keep this updated.
So it says, you should know, you need to join the Microsoft ASC, their repo, their third-party repo, which I had before then I think because I removed it as a duplicate.
So to clarify, it's not the case that you need to do this process. It is actually an automatic upgrade thing but you do need to be attached to the Microsoft repo to get those.
\ No newline at end of file
+Your task is to take this system prompt and rewrite it for implementation in a structured AI system.
In order to do so, adhere to the following instructions.
Within the text of the prompt itself, define the The JSON output that the AI should be constrained to giving.
And instruct the AI tool that it is working in a structured workflow and must only return valid JSON.
Create a folder for the prompt.
And add in addition to the rewritten prompt text.
You should also create a .json file containing an Open API compliant JSON schema and finally and you create another JSON called object.json which contains just the JSON object.
\ No newline at end of file
diff --git a/transcripts/uncorrected/19.txt b/transcripts/uncorrected/19.txt
index ffc57e5992be591a97dbd7ee169ed839fe73e975..76af9ed38a7f3a464480738293afb78a25ff5929 100644
--- a/transcripts/uncorrected/19.txt
+++ b/transcripts/uncorrected/19.txt
@@ -1 +1 @@
-I want to add to my DSR Holdings a LLM store TXT. It's almost a pity I didn't talk about this with Shlomo, but a radical idea. It actually, I mean, it appears to be working. I don't know if you're sure where I read from if it just parts my home page or read the txt but I asked Claude to pull in some context data about me into the into the file it seemed to work really well so what the thought I had for I mentioned Shlomo and what I thought about for myself is inbound LLM marketing considering AI traffic.
It's a pity I didn't take some in fact I'll add to the DAM a screenshots folder because a perfect example of a screenshot was the last time that I saw a and I sure I see them almost every day A sign up form where they didn ask for was the LLM your referral source I think it's absolutely insanity that anyone, any company would not have LLM as top of their list of referral sources for traffic.
And this opens up a whole world actually of LLM analytics. and you see which LLMs are scraping our site. LLM optimization. And then basically the idea of being LLM as an inbound pipeline. If you did all this well, could you actually view large language models as an inbound traffic source saying Google's dead, LLM is where it's at.
Here's how you can, I mean, I would have to try these approaches on my own site, but all I can do there is keep optimizing and see if someone says, if you typed into ChatGPT in a month and said, I need someone who's good with AI in Jerusalem, Israel. Can you find any profiles? And if it worked, that would almost be the opposite to pursue the outbound track as well for jobs. But as a complementary angle of attack, I think it would be very interesting to see as an experiment even.
\ No newline at end of file
+Okay, so here is the type of license that generally work for me for open source projects. I usually open source software because I've created something useful. I think other people might either find it helpful or develop upon the idea to do it to take my idea and ability further. Attribution is always appreciated but I'd only want to make it mandatory if that wouldn't really sort of create friction with other people who'd like to use a project.
But attribution really helps me because it opens up the relationship and connectedness of open sourcing because if someone were to use it downstream, they have a way to sort of get in touch with me. People commercializing open source software doesn't sit very well with me, but again, it's only if it's, I'd be very reluctant to add that as a limitation.
Other than that, nothing else really stands out to me as something that I'd require. Like if people took it in any other direction, it's fine. The only one I think about sometimes is obviously no one wants something that creates to be sort of misused or used for harm. And one also doesn't want to end up with lawsuits if something they create is misused, so I don't know if there's any legal language that can create a little bit of protection around those potentials.
\ No newline at end of file
diff --git a/transcripts/uncorrected/2.txt b/transcripts/uncorrected/2.txt
index c4062c3c839f500b2242b1b7628a7ef9e4bd26f0..57ee9e7328b60a23b4d9d39ea97021e9d3ff8e2d 100644
--- a/transcripts/uncorrected/2.txt
+++ b/transcripts/uncorrected/2.txt
@@ -1 +1 @@
-I would like to create a docs folder in this repository.
The docs folder should be separate from the code and it will be the place in which documentation is gathered.
Ask the user if there is any specific functionalities or aspects of the application that the user wishes to document in this folder.
The docs folder should be mentioned and linked in the readme, directing users to it for more extensive documentation than can be found in the readme itself.
\ No newline at end of file
+So, I have a question. For image to video, it's currently expensive, very expensive actually. I'm trying to find a way. So I found the WAN models, which are by Alibaba. I find them to be very good, and they have a more affordable WAN model that I like using. And when I'm doing a video, I frequently gather up my images, gather up my prompts, and I move in towards a workflow by which I kind of do the storyboarding, gather the source material as I call it, the photos. Gather the prompt together, and then I will run it as a script, which is a very novel way for me of approaching content creation in the sense that it's programmatic and it's code first.
Which is a strange way to approach a creative process, but it works. And it seems to me at the moment to be the most effective way to do this because otherwise, before this, I was using a playground, running them one by one, importing them to a video editor, and it's just a lot slower that way. Now the issue is that image to video, as I mentioned, is expensive. And if I'm doing these projects for fun, I have a lot of ideas I want to do for fun. But even the cheaper WAN models are in the region of 10 to 15 cents per generation, which could easily, it's very easy to go through 20 or even 50 dollars, especially given the fact that frequently you need to generate the same prompt multiple times before you get a satisfactory result.
I really, really want to explore image to video, and I'm trying to find a way to have an affordable way to play around with it even if it's not the best model. And you know, so what I've been thinking of is I come across for a while providers like RunPod who do make GPU available either in serverless functions or they do per hour pricing on GPUs. And since I discovered Replicate and FAL, I've kind of wondered, well, if you can just make an API call, why go to the trouble of managing an instance of a machine? I'm thinking now that it might be the cost reason that if the machines are a certain price per hour, it might actually be a lot more cost-effective than using an API.
So my question is, firstly, is that the case? Is a frequent reason that people actually do these or use these services for cost mitigation? And so on. So that's the first thing. Secondly, serverless versus pods as RunPod calls them. I guess serverless almost makes more sense to me because you just pay for what you use and you don't need to worry about starting and stopping the pod and configuring auto shutdown policies. So what’s the reason that people go for pods over serverless?
And finally, if I want to do this, probably the objective would be, is there a way that you can have like your own API endpoint and that's running stuff on the serverless function in the backend? And what I get confused about for these things, the first time I did it, if I'm not mistaken, I did it with video generation. The video actually generated on my local, which seems almost like magic to me. So you're doing the actual inference rendering in the cloud. And is it just the case when that happens? And so on. And then just running my script and then I'm using on-demand compute.
\ No newline at end of file
diff --git a/transcripts/uncorrected/20.txt b/transcripts/uncorrected/20.txt
index e9383aa5db79a22c214793ffdd4a93fc6ed49a60..b57417cde1c5303a489404bcd259f827ea2cf7a6 100644
--- a/transcripts/uncorrected/20.txt
+++ b/transcripts/uncorrected/20.txt
@@ -1 +1 @@
-Can I just make a suggestion? Before we proceed in this direction, I think that it definitely is the right content environment. But the reason I've created these is so that we have them ready for recurrent use. So Lama Index is very, very good and would be used for a lot of very versatile.
So before we start, let's update the cond environment to install all the different utilities we might need for tokenizing text, processing markdown, markdown to PDF, PDF splitting, all these different text utilities. Even ImageMagick typesetting utilities. Once we have that ready then we can begin. But let's get that environment good first if we can use a conda.yaml to define it.
In other words, take in the existing environment, make a few edits and then install that. Just remember there's an AMD GPU so it will affect the choice of packages.
\ No newline at end of file
+The problem is that we looked at this before and when it reboots the router it's not bringing up the Cloudflare tunnel.
So see it's working now, but just see what can be done to make sure that this, we need to make very certain that this does start automatically on reboot.
\ No newline at end of file
diff --git a/transcripts/uncorrected/21.txt b/transcripts/uncorrected/21.txt
index 68f0272363ffede253054f91243a4d0b8203d19b..4b73fda258009e56d8fc1e8ade93312193c751d0 100644
--- a/transcripts/uncorrected/21.txt
+++ b/transcripts/uncorrected/21.txt
@@ -1 +1 @@
-Okay, here's just a few more specific things that I want to include. So I see you mentioning hydration drinks, which is very important. Electrolyte tablets become very expensive. So there's a few things I'd like to explore. More cost-effective ways for making them. I think you can buy them as a dry powder is one idea. The second one is a homemade recipe.
The next set of ideas is I really really need to always have some kind of food stuff at home ready to eat. So there's a few things in that regard. A list of a kind of basic pantry shopping list. Obviously optimized for all the dietary recommendations we've discussed here. Suggestions for, and I think protein bars aren't really enough, it needs to be carbohydrate as well. Recipes or suggestions for homemade protein bars for the same reason that they become very expensive to buy them individually.
That's probably the key thing I'm looking for at the moment is to have always on hand the ingredients and ideally like kind of a backup layer like I kind of make these protein bars but I also and that's kind of the fallback but ideally I prefer to obviously eat and so on.
\ No newline at end of file
+I recently picked up a Samsung Galaxy 6 smartwatch just to try out the idea basically.
And my only need was really for a dual time display, local and UTC, and the day display.
It was about $100 give or take, so a very basic entry level that would sync with my OnePlus.
If it turns out that I really like it...
The other requirement was a good microphone for voice recordings.
Even if it's not the best and my phone is better, it would be nice to be able to use it for that because I take a lot of voicemails during the day.
If I turn out to really like it, what would you suggest as a good upgrade?
I tend to like more everything that's getting under the hood with technology.
So I wasn't thrilled about buying a Samsung, but it was what was available for the price point approximately.
\ No newline at end of file
diff --git a/transcripts/uncorrected/22.txt b/transcripts/uncorrected/22.txt
index b373213f419ec9b2e4b9ca165f42170441577ed2..dd20ba87d4e27f321810d0504c0736c4e154d407 100644
--- a/transcripts/uncorrected/22.txt
+++ b/transcripts/uncorrected/22.txt
@@ -1 +1 @@
-Okay there's a bunch of memory layer projects now to explore later that are actually it's not longer separation between vector storage and memory which makes sense because it's kind of basically the same server it's offered by API mem0 super memory remember api memories.api that's a good starter list and they can all be integrated and used they'll do the vector backend so I'm using I'm testing it out on the documentary finding one, but just to see the concept and how it works with agency.
\ No newline at end of file
+I recently picked up a smartwatch from Samsung Galaxy and I'm curious one thing that would be really helpful that I thought of.
I'm always stressed about losing or potentially losing phone wallet keys.
And for all of these things, Fun Walla Keys, I use Pebble Bee Tracker now.
So I'm wondering if there's any way or app that can do something like geofencing in which if any of the things are...
Maybe you can turn it on and off at certain times but they're in.
If they move out of the zone you get an alert notification if the smartwatch vibrates or whatever.
\ No newline at end of file
diff --git a/transcripts/uncorrected/23.txt b/transcripts/uncorrected/23.txt
index 847a19b97210af5a0d79cb54c259b54cbe8103aa..1c658e5f3d7436116c6a372301158c4d76aff497 100644
--- a/transcripts/uncorrected/23.txt
+++ b/transcripts/uncorrected/23.txt
@@ -1 +1 @@
-Create now a meetings taker, meetings minute producer. It will have the following functionality. The user will upload a recording of meetings, of a meeting that took place. and we'll provide then there will be a section so that's an audio upload functionality the next one will be a meeting participants the user will provide the names and identifying characteristics of people who are audible in the recording so it'll say like for example and there should be Name, Description, Daniel, male voice in the recording, Hannah, female voice in the recording.
Upon receiving both of these things, it will send it to Gemini Multimodal in order to produce two things One is a transcript, slightly cleaned up diaries transcript That's one output and the second one is a minute which is a automatically generated minutes formatted with decisions, action items for each participant.
And then it should be integrated with Google Drive so the user can connect their Google Drive and save them to a folder after they've been generated and view them in the app.
\ No newline at end of file
+Something that would be very useful would be the following. So I use an app called Voice Notes for Android. And it's a voice recording app. It's called Voice Notes. Now, it has one fatal flaw, in my opinion, which is that it doesn't have Bluetooth support. So when I'm out and about, like now, I literally hold the phone up to my mouth, and it certainly gets me much, much, much better recording quality, but I kind of look a little bit goofy and I feel very self-conscious.
So there's two things I've thought about. One is finding a voice recording app that has more robust Bluetooth support. I think there are two options really that I'm thinking of. The first is finding, as I said, a voice recorder with very robust Bluetooth support and using a Bluetooth microphone to record with. The alternative, because I'm seeing these products come to market increasingly, is to use a wearable Android device, which probably wouldn't be that different, maybe even physically. And I think the more I think about it, the more I think about it, the more I think about it, the more flexibility. Rather than being a Bluetooth accessory, it's running, I guess, Wear OS, and maybe that would give you more flexibility.
I'm trying to think of the pros and cons on which would be better. I veered towards the wearable approach as it seems to be what's where. I don't know where the market is going with this concept, but I'm curious to know what your thoughts are regarding the pros and cons.
\ No newline at end of file
diff --git a/transcripts/uncorrected/24.txt b/transcripts/uncorrected/24.txt
index 73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c..59abde81206328bbd33b6fe792b0dcf161a7d148 100644
--- a/transcripts/uncorrected/24.txt
+++ b/transcripts/uncorrected/24.txt
@@ -1 +1 @@
-I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
+So there's a lot of these AI voice pins emerging onto the market which are designed to be wearable devices.
So I record as I'm doing now quite a number of voice notes when I'm out of the house.
I use an Android app called Voice Notes that I really like but it doesn't have support yet for Bluetooth microphones.
At least not support that's reliable.
So I have to hold the phone up to my mouth, which really kind of degrades the experience.
As I started, I want to actually start doing, going on walks expressly for usually the moment I do this when I'm going places.
I just happen to think, but I actually want to start taking walks to jot down some ideas as a healthier way of combining work and getting out and getting some exercise and getting some sunlight.
And for that it would be really nice to not have to, you know, be holding up a phone to your mouth for 30 minutes or an hour or whatever it may be.
So I was thinking about wearable voice recorders but a lot of them from what I've seen are these kind of closed ecosystems in which they sell you can't just buy the hardware.
They'll sell you, they'll do like onboard transcription or they'll sell you like a Cloud Transcription Bundle.
I'm really not a fan of on-device transcription.
I mean I think it works but in my experience it doesn’t make a lot of sense to me just architecturally.
I think why do stuff on device that can be done in the cloud cost effectively?
And you got, you know, you can run vastly more powerful models in the cloud.
You don’t have to worry about quantizing models on a very, very small piece of hardware.
And so I guess what would be great for me, but Android, when you're looking at wearables, Android's like the obvious sync partner.
So you just need to get the voice of the audio data from the recording thing to Android and from there you can push to the cloud and then the rest is back-end speech and text.
So what I'm saying is that I'd love a modular solution that could do this.
A pin that is just hardware, just recording this audio sync, maybe has its own app, or maybe can be used preferably with third-party apps.
And therefore you can kind of build your own voice recording stack around it, and you can use your existing Speech-to-Docs transcription workflow.
And you don't have to subscribe to these very kind of, I forget the word, walled gardens in which the vendor chooses your force into this package that's often very unnecessarily expensive and you're paying mostly for overpriced transcription.
I'd prefer to just get, invest in good hardware!
\ No newline at end of file
diff --git a/transcripts/uncorrected/25.txt b/transcripts/uncorrected/25.txt
index 24994713fc006cf39dff6433f341d9e5b812c141..65b441258e93b436681d73b0928dd3ea5da97777 100644
--- a/transcripts/uncorrected/25.txt
+++ b/transcripts/uncorrected/25.txt
@@ -1 +1 @@
-So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
+I picked up a Samsung Galaxy FE watch. I checked compatibility, smartwatch. I think it's in the 7 series if I'm not mistaken. What is it exactly? It's a 40mm smartwatch. Where does it fit in their line up? What's the difference between this and the Watch 7? I just went for this one because it was what was in stock.
And is it shower proof, waterproof? And I know it's a glass display. So I'm wondering how tough is the glass? Or is it tough at all? I just asked because it's a fitness watch. I assume they make them a little bit more ruggedized, but maybe that's not the case. What does it say?
\ No newline at end of file
diff --git a/transcripts/uncorrected/26.txt b/transcripts/uncorrected/26.txt
index 8eb532b0a713565b3b2fae20960656ec0d9e6e2f..b51e9cf9eacfa8f539ba2c6270fbbbdcb80adeda 100644
--- a/transcripts/uncorrected/26.txt
+++ b/transcripts/uncorrected/26.txt
@@ -1 +1 @@
-Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
+So there has been this vast development in multimodal AI recently. I signed up for Replicate and FAL AI. And what really strikes me is not only the diversity and number of models out there, but also the large number of permutations in multimodal AI, meaning what input can go to what output. And I think what I find difficult about it at the moment to navigate as a, let's say, creator. I created a few music videos just as kind of fun experiments. Is that there's so many different models. Like just in, let's say, the one series, there is maybe 20 different models to choose from in FAL, but they all do slightly different things not only in terms of the resolution and the parameter and the max duration but also in terms of the modalities, and they don’t really allow you to filter on this at the moment.
So what I mean by that is if we take an image to video model that animates still images to video, one model in one might create video without audio and another might create video with audio. And that's a very significant difference. But there's also a significant difference in do I prompt for the audio? In other words, is it going to be text to audio and render out audio that then gets added to video? Is it reference audio and reference image? So when you begin opening, all these differences really matter because I might want to filter on ideally, let's say I wanted to look at image to video models, which could generate lip sync to audio from a prompt. That might be one use case as well as the video.
In another use case, I might want to create a dialogue video. Let’s say I have a still image of a crowded market in Jerusalem, and I might want to print something like create a video from this image; the background soundtrack is background conversation noise in a bustling marketplace with vendors yelling out sales prices. That's just an example of the kind of background noise and the ambient noise that we have in this market I'm thinking about.
So what I would like to do, I created this repository which I created here. I'm trying to think of a taxonomy for multimodal, really for my own reference, but also as an open source project. Exploring the permutations of multimodal that are possible. So in the preceding example, we might have one definition of a modality might be still image to video without audio. Another modality, and then the description. Another modality might be still image to audio without lip sync. Another modality might be still image to video with lip sync.
But then you might have some sub modalities being still image to video with lip sync with reference image, that a reference to image. Another sub modality there might be still image to video with reference character reference in video. Another might be still image to video with audio with character reference through a LoRa (L-O-R-A). And I reckon that if we really enumerated the modalities we might get to hundreds if not thousands of different ones. For example, in FAL, just to talk about the long tail, there's music to music, which is music in painting. There's audio in painting, well, yeah, audio in painting, which I'm thinking aloud here is, I guess, distinguished music in painting is a subset of audio in painting, that it's melodic.
So that's the objective. I think that the JSON is the obvious format in which to attempt to denote these. And what I'd like you to do as the task definition is try to do this basically. Try to enumerate, list out a hierarchy, some kind of taxonomy representation that makes sense. We could try to create a baseline and then explore various ways of mapping out the hierarchy, manipulating the JSON so that we look at different ways of organizing it. So I think it would be useful to have like a first entry JSON in which we, and later maybe I, as new modalities come to, and we can maybe have very interesting labels might be their point of maturity, example workflows, use cases, etc. There's an awful lot that could be explored within these parameters.
\ No newline at end of file
diff --git a/transcripts/uncorrected/27.txt b/transcripts/uncorrected/27.txt
index 492695d3c04244eba8ee90b40f4d0ed8cbb6793b..7691086737e7862b23604ec7c3b5a56071521899 100644
--- a/transcripts/uncorrected/27.txt
+++ b/transcripts/uncorrected/27.txt
@@ -1 +1 @@
-Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
+Look at the Facer's, I'm really surprised for no one's made a Hebrew date watch on the Facer creator, but it's probably the developer studio from Samsung is the way to go for that. And I want to edit, like the one that I have slightly, I can't find the perfect one, people put too much on them. I'm looking at the face I got from Facer now and they've added temperature, sunrise, sunset, neither of which work, I guess the integrations don't work, but who wants that on their watch? These are all like anti-simplicity. I just want... It's almost perfect, but they added these stupid unnecessary features.
Maybe on the Facer creator marketplace, I can just create one that I want. Maybe that will actually work. That's probably the easiest way to go. But if that doesn't work, I can create one on Github and open sources, the font that I want, but the Hebrew one would be very special to me. It's definitely possible.
I'm looking at my desktop display. It says 30 Tishra 5786. So for sure from the HIPAA Cal API the data source is there. And I looked last night and it seemed that people only had created sort of ones for from a very different reason.
The VoiceNote data set I really want to create as well. That's actually a very important project, the GUI for adding that I have a backlog of literally thousands and it would form the basis for my classification model which I should probably note out and that's a real model I can build for the idea as well.
\ No newline at end of file
diff --git a/transcripts/uncorrected/28.txt b/transcripts/uncorrected/28.txt
index acadef7c73d2b38c88ec7b03751c008a67eca4fc..eaea5b9166faabd9642d0c97478ecd6f6fd86d89 100644
--- a/transcripts/uncorrected/28.txt
+++ b/transcripts/uncorrected/28.txt
@@ -1 +1 @@
-Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
+Okay, so I've just configured. VS Code is very, very important. I've just configured automatic updates, and I asked Claude, I said, why am I not getting them? Why do I, it says, you're out of date, download the Debian. And I said, I don't want to have to download a Debian every time, and I really want to keep this updated.
So it says, you should know, you need to join the Microsoft ASC, their repo, their third-party repo, which I had before then I think because I removed it as a duplicate.
So to clarify, it's not the case that you need to do this process. It is actually an automatic upgrade thing but you do need to be attached to the Microsoft repo to get those.
\ No newline at end of file
diff --git a/transcripts/uncorrected/29.txt b/transcripts/uncorrected/29.txt
index 48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45..ffc57e5992be591a97dbd7ee169ed839fe73e975 100644
--- a/transcripts/uncorrected/29.txt
+++ b/transcripts/uncorrected/29.txt
@@ -1 +1 @@
-Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
+I want to add to my DSR Holdings a LLM store TXT. It's almost a pity I didn't talk about this with Shlomo, but a radical idea. It actually, I mean, it appears to be working. I don't know if you're sure where I read from if it just parts my home page or read the txt but I asked Claude to pull in some context data about me into the into the file it seemed to work really well so what the thought I had for I mentioned Shlomo and what I thought about for myself is inbound LLM marketing considering AI traffic.
It's a pity I didn't take some in fact I'll add to the DAM a screenshots folder because a perfect example of a screenshot was the last time that I saw a and I sure I see them almost every day A sign up form where they didn ask for was the LLM your referral source I think it's absolutely insanity that anyone, any company would not have LLM as top of their list of referral sources for traffic.
And this opens up a whole world actually of LLM analytics. and you see which LLMs are scraping our site. LLM optimization. And then basically the idea of being LLM as an inbound pipeline. If you did all this well, could you actually view large language models as an inbound traffic source saying Google's dead, LLM is where it's at.
Here's how you can, I mean, I would have to try these approaches on my own site, but all I can do there is keep optimizing and see if someone says, if you typed into ChatGPT in a month and said, I need someone who's good with AI in Jerusalem, Israel. Can you find any profiles? And if it worked, that would almost be the opposite to pursue the outbound track as well for jobs. But as a complementary angle of attack, I think it would be very interesting to see as an experiment even.
\ No newline at end of file
diff --git a/transcripts/uncorrected/3.txt b/transcripts/uncorrected/3.txt
index ec565a8b602b1abb235b4c8a5616370d701f5be7..a2ad0808542f04e9e26405fe883f5a3a95fa8ce7 100644
--- a/transcripts/uncorrected/3.txt
+++ b/transcripts/uncorrected/3.txt
@@ -1 +1 @@
-Please go through the markdown files in this repository to make sure that no emojis have been used.
If you find any emojis, remove them.
If emojis have been used in place of proper icons, then identify an appropriate icon library that could be used to provide the emojis.
Remember that if the icons are well known, such as the icons from major social networks, these should be integrated via a pre-designed library.
Do not attempt to create custom once-off SVGs for any logo that likely already exists in a professional library.
\ No newline at end of file
+Yeah, I think I would look for... the truth is, I was initially... I have to try out my Cherry Red keyboard, the split one is a long term thing. But in the short term I have to say I've really warmed to MX Brown, and I think at this point I probably would use any MX Brown keyboard without noticing much of a difference from the AliExpress one, which is a brown imitation.
And this frankly one is it's a wired one and what I would like probably I'm thinking at the moment I wanted to set up a binding for cloud code and I think that rather than go down in the macro pad approach, which is one way, one approach certainly, it would be really nice to have a keyboard with built-in macro keys.
I think the MX Red one that I got has about five macro keys and I'm wondering if you can put about, you know, if you put up the entire top of the keyboard or the number pad, which I'm looking at the keyboard now. A lot of the keys that I rarely use are the sound controls, the number operators, pause, scroll lock, print screen. There's probably about 20% of the keyboard that I rarely touch.
Do you have any recommendations for a brown keyboard? Let's say I don't like compact keyboards, so I do like the full-size keyboard. The small keyboards feel cramped to me, but that has a full keyboard section and then maybe fills up some space on the right and along the top with macro keys, and so that rather than adding on micro pads you can just create some assignments on the keyboard itself.
\ No newline at end of file
diff --git a/transcripts/uncorrected/30.txt b/transcripts/uncorrected/30.txt
index 353b380ddee0d6134e7cfc905de9171524ef566e..e9383aa5db79a22c214793ffdd4a93fc6ed49a60 100644
--- a/transcripts/uncorrected/30.txt
+++ b/transcripts/uncorrected/30.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
+Can I just make a suggestion? Before we proceed in this direction, I think that it definitely is the right content environment. But the reason I've created these is so that we have them ready for recurrent use. So Lama Index is very, very good and would be used for a lot of very versatile.
So before we start, let's update the cond environment to install all the different utilities we might need for tokenizing text, processing markdown, markdown to PDF, PDF splitting, all these different text utilities. Even ImageMagick typesetting utilities. Once we have that ready then we can begin. But let's get that environment good first if we can use a conda.yaml to define it.
In other words, take in the existing environment, make a few edits and then install that. Just remember there's an AMD GPU so it will affect the choice of packages.
\ No newline at end of file
diff --git a/transcripts/uncorrected/31.txt b/transcripts/uncorrected/31.txt
index 0ec335394a72e80887a3672f290bc5828d8227e0..68f0272363ffede253054f91243a4d0b8203d19b 100644
--- a/transcripts/uncorrected/31.txt
+++ b/transcripts/uncorrected/31.txt
@@ -1 +1 @@
-I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
+Okay, here's just a few more specific things that I want to include. So I see you mentioning hydration drinks, which is very important. Electrolyte tablets become very expensive. So there's a few things I'd like to explore. More cost-effective ways for making them. I think you can buy them as a dry powder is one idea. The second one is a homemade recipe.
The next set of ideas is I really really need to always have some kind of food stuff at home ready to eat. So there's a few things in that regard. A list of a kind of basic pantry shopping list. Obviously optimized for all the dietary recommendations we've discussed here. Suggestions for, and I think protein bars aren't really enough, it needs to be carbohydrate as well. Recipes or suggestions for homemade protein bars for the same reason that they become very expensive to buy them individually.
That's probably the key thing I'm looking for at the moment is to have always on hand the ingredients and ideally like kind of a backup layer like I kind of make these protein bars but I also and that's kind of the fallback but ideally I prefer to obviously eat and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/32.txt b/transcripts/uncorrected/32.txt
index 243f36cf36c052964af7ebe83a792dae9e67d205..b373213f419ec9b2e4b9ca165f42170441577ed2 100644
--- a/transcripts/uncorrected/32.txt
+++ b/transcripts/uncorrected/32.txt
@@ -1 +1 @@
-I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
+Okay there's a bunch of memory layer projects now to explore later that are actually it's not longer separation between vector storage and memory which makes sense because it's kind of basically the same server it's offered by API mem0 super memory remember api memories.api that's a good starter list and they can all be integrated and used they'll do the vector backend so I'm using I'm testing it out on the documentary finding one, but just to see the concept and how it works with agency.
\ No newline at end of file
diff --git a/transcripts/uncorrected/33.txt b/transcripts/uncorrected/33.txt
index 35a55fa10abb62fbf49bc2c38d73e8cc53fca620..847a19b97210af5a0d79cb54c259b54cbe8103aa 100644
--- a/transcripts/uncorrected/33.txt
+++ b/transcripts/uncorrected/33.txt
@@ -1 +1 @@
-This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
+Create now a meetings taker, meetings minute producer. It will have the following functionality. The user will upload a recording of meetings, of a meeting that took place. and we'll provide then there will be a section so that's an audio upload functionality the next one will be a meeting participants the user will provide the names and identifying characteristics of people who are audible in the recording so it'll say like for example and there should be Name, Description, Daniel, male voice in the recording, Hannah, female voice in the recording.
Upon receiving both of these things, it will send it to Gemini Multimodal in order to produce two things One is a transcript, slightly cleaned up diaries transcript That's one output and the second one is a minute which is a automatically generated minutes formatted with decisions, action items for each participant.
And then it should be integrated with Google Drive so the user can connect their Google Drive and save them to a folder after they've been generated and view them in the app.
\ No newline at end of file
diff --git a/transcripts/uncorrected/34.txt b/transcripts/uncorrected/34.txt
index e3960e6d457375f71a0aa63d07c4c8ad4af74fc2..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c 100644
--- a/transcripts/uncorrected/34.txt
+++ b/transcripts/uncorrected/34.txt
@@ -1 +1 @@
-Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
+I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
diff --git a/transcripts/uncorrected/35.txt b/transcripts/uncorrected/35.txt
index 4215c595a95e066a9ecda2a2ae08b9013686c002..24994713fc006cf39dff6433f341d9e5b812c141 100644
--- a/transcripts/uncorrected/35.txt
+++ b/transcripts/uncorrected/35.txt
@@ -1 +1 @@
-Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.
Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.
Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.
The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.
Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.
The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer.
\ No newline at end of file
+So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
diff --git a/transcripts/uncorrected/36.txt b/transcripts/uncorrected/36.txt
index 145fac41057e67a2489a588fef1f5d5a4b0df965..8eb532b0a713565b3b2fae20960656ec0d9e6e2f 100644
--- a/transcripts/uncorrected/36.txt
+++ b/transcripts/uncorrected/36.txt
@@ -1 +1 @@
-Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.
Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.
Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.
And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK
So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.
I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.
I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.
So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that !
\ No newline at end of file
+Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
diff --git a/transcripts/uncorrected/37.txt b/transcripts/uncorrected/37.txt
index b314f3f74074ca02c2a47132cea688da6abb56d9..492695d3c04244eba8ee90b40f4d0ed8cbb6793b 100644
--- a/transcripts/uncorrected/37.txt
+++ b/transcripts/uncorrected/37.txt
@@ -1 +1 @@
-Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.
Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.
And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.
And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.
But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be.
\ No newline at end of file
+Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
diff --git a/transcripts/uncorrected/38.txt b/transcripts/uncorrected/38.txt
index 8d2caf72445f7704d8455a3c2b790fdf76026b9e..acadef7c73d2b38c88ec7b03751c008a67eca4fc 100644
--- a/transcripts/uncorrected/38.txt
+++ b/transcripts/uncorrected/38.txt
@@ -1 +1 @@
-The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.
I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system.
\ No newline at end of file
+Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
diff --git a/transcripts/uncorrected/39.txt b/transcripts/uncorrected/39.txt
index 2acd54bd254b2cdcc6a5457142eb4e0e917685f0..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 100644
--- a/transcripts/uncorrected/39.txt
+++ b/transcripts/uncorrected/39.txt
@@ -1 +1 @@
-Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.
The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.
I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.
What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice.
\ No newline at end of file
+Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
diff --git a/transcripts/uncorrected/4.txt b/transcripts/uncorrected/4.txt
index 97dc205e9d7b77068f580705263f66d3a0ce82b0..e0ca9c5f871fe1db6ec60a09ae492e1cb1614512 100644
--- a/transcripts/uncorrected/4.txt
+++ b/transcripts/uncorrected/4.txt
@@ -1 +1 @@
-Go through the website and see any place in which icons have been implemented which were custom designed but which could have been implemented more efficiently through using an existing icon library.
Pay particular attention to icons for common uses such as social media icons which exist in many libraries, as well as emojis which may have been used in place of icons.
This approach should not be followed.
If the user uses an existing icon library that you can identify, then replace the custom coded icons with the most appropriate matches.
If the user hasn't yet implemented an icon library, provide some suggestions to the user, focusing on those libraries which will best match the aesthetic which they are following in their designs.
\ No newline at end of file
+Okay, so for Kdenlive, I wanted to get a macro pad with three toggles for video editing, a control surface in other words. I know that people on use, there's a few macro paths or there's a large community of people who have adapted different things for use with Kdenlive as control panels or control surfaces as they're called.
I have a friend who is a photographer and he bought an off-the-shelf controller and used it as a control surface for something else. And it made me think, is there anything that people commonly use for Kdenlive? What would be really helpful would be the three wheels for color correction, which would probably be... Those are, I guess, kind of toggles, and then scroll wheels for three scroll wheels, and it's always in pairs of three for that. But yeah, those are the ones that people commonly use and like.
\ No newline at end of file
diff --git a/transcripts/uncorrected/40.txt b/transcripts/uncorrected/40.txt
index b2de03d17424a2fed8639d2dfa09c98e84d864d7..353b380ddee0d6134e7cfc905de9171524ef566e 100644
--- a/transcripts/uncorrected/40.txt
+++ b/transcripts/uncorrected/40.txt
@@ -1 +1 @@
-It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.
And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached.
\ No newline at end of file
+I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
diff --git a/transcripts/uncorrected/41.txt b/transcripts/uncorrected/41.txt
index f2066bdff489a0e7af0c17fa8ccf736412194aad..0ec335394a72e80887a3672f290bc5828d8227e0 100644
--- a/transcripts/uncorrected/41.txt
+++ b/transcripts/uncorrected/41.txt
@@ -1 +1 @@
-Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.
Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.
So if you happen to know, you should know of any products on AliExpress and product numbers, list them please.
\ No newline at end of file
+I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
diff --git a/transcripts/uncorrected/42.txt b/transcripts/uncorrected/42.txt
index 73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c..243f36cf36c052964af7ebe83a792dae9e67d205 100644
--- a/transcripts/uncorrected/42.txt
+++ b/transcripts/uncorrected/42.txt
@@ -1 +1 @@
-I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
+I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/43.txt b/transcripts/uncorrected/43.txt
index 24994713fc006cf39dff6433f341d9e5b812c141..35a55fa10abb62fbf49bc2c38d73e8cc53fca620 100644
--- a/transcripts/uncorrected/43.txt
+++ b/transcripts/uncorrected/43.txt
@@ -1 +1 @@
-So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
+This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
diff --git a/transcripts/uncorrected/44.txt b/transcripts/uncorrected/44.txt
index 5eac1414e49e1b8618ce1ba2193d7d10b91f431a..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2 100644
--- a/transcripts/uncorrected/44.txt
+++ b/transcripts/uncorrected/44.txt
@@ -1 +1 @@
-I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.
In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end?
\ No newline at end of file
+Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/45.txt b/transcripts/uncorrected/45.txt
index 8eb532b0a713565b3b2fae20960656ec0d9e6e2f..4215c595a95e066a9ecda2a2ae08b9013686c002 100644
--- a/transcripts/uncorrected/45.txt
+++ b/transcripts/uncorrected/45.txt
@@ -1 +1 @@
-Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
+Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.
Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.
Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.
The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.
Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.
The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer.
\ No newline at end of file
diff --git a/transcripts/uncorrected/46.txt b/transcripts/uncorrected/46.txt
index 492695d3c04244eba8ee90b40f4d0ed8cbb6793b..145fac41057e67a2489a588fef1f5d5a4b0df965 100644
--- a/transcripts/uncorrected/46.txt
+++ b/transcripts/uncorrected/46.txt
@@ -1 +1 @@
-Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
+Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.
Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.
Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.
And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK
So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.
I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.
I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.
So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that !
\ No newline at end of file
diff --git a/transcripts/uncorrected/47.txt b/transcripts/uncorrected/47.txt
index acadef7c73d2b38c88ec7b03751c008a67eca4fc..b314f3f74074ca02c2a47132cea688da6abb56d9 100644
--- a/transcripts/uncorrected/47.txt
+++ b/transcripts/uncorrected/47.txt
@@ -1 +1 @@
-Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
+Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.
Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.
And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.
And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.
But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be.
\ No newline at end of file
diff --git a/transcripts/uncorrected/48.txt b/transcripts/uncorrected/48.txt
index 48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45..8d2caf72445f7704d8455a3c2b790fdf76026b9e 100644
--- a/transcripts/uncorrected/48.txt
+++ b/transcripts/uncorrected/48.txt
@@ -1 +1 @@
-Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
+The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.
I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system.
\ No newline at end of file
diff --git a/transcripts/uncorrected/49.txt b/transcripts/uncorrected/49.txt
index 353b380ddee0d6134e7cfc905de9171524ef566e..2acd54bd254b2cdcc6a5457142eb4e0e917685f0 100644
--- a/transcripts/uncorrected/49.txt
+++ b/transcripts/uncorrected/49.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
+Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.
The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.
I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.
What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice.
\ No newline at end of file
diff --git a/transcripts/uncorrected/5.txt b/transcripts/uncorrected/5.txt
index 0f9f01aeb1efa9b56a188dbecffed93a32cfd7c5..2a58c5f30c61703af3ed4fd13d9b1c23315f1326 100644
--- a/transcripts/uncorrected/5.txt
+++ b/transcripts/uncorrected/5.txt
@@ -1 +1 @@
-This repository contains a collection of slash commands which I use with Claudecode.
I capture some of the slash commands using speech to text.
The slash commands that have been captured with dictation frequently lack elements like punctuation, paragraph spacing, and they may contain occasionally words that were mistranscribed.
Please recurse through the directories and correct slash commands which you can find which were missing these basic textual features but do not limit your fixes to only I don't want to go into those containing these defects but rather consider in your editing any slash commands which need to be rewritten for optimal intelligibility.
\ No newline at end of file
+So I'd love to get your thoughts on the following. There's a tweet from Sam Altman that he wrote a few years ago and it's aged quite well as they say. He was announcing the release of ChatGPT and maybe an early iteration of ChatGPT, maybe 3 or 3.5 or something like that. Maybe even an earlier one. And the tweet went something like, it's our conversational, or first it's a conversational model or something.
And what's interesting to me about this is that I discovered AI through ChatGPT or got excited about it through that interface. And then from there worked back to more instructional workloads as then I used it as a chat interface, then began using LLMs through their API endpoints and then began using them programmatically and scripting and using them on my local computer. And now I doing much more of that than I am using them as chatbots.
I know a lot of people, I think even people who are pretty technically literate, aren't really aware that there's, that there's, AI can be used in this way. But what's interesting about that tweet I mentioned is it inferred that instructional models actually predate conversational models. In other words, that I think what he was saying was that OpenAI had developed GPT firstly for instruction following, and then they sort of refined it for conversation.
And what I'm curious to know is, is that accurate that instructional models predate conversational models and if so by sort of how long?
\ No newline at end of file
diff --git a/transcripts/uncorrected/50.txt b/transcripts/uncorrected/50.txt
index da218ad130c3c5a5f3ca672509c6c517f4fa87f2..b2de03d17424a2fed8639d2dfa09c98e84d864d7 100644
--- a/transcripts/uncorrected/50.txt
+++ b/transcripts/uncorrected/50.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.
When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.
The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.
This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case.
\ No newline at end of file
+It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.
And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached.
\ No newline at end of file
diff --git a/transcripts/uncorrected/51.txt b/transcripts/uncorrected/51.txt
index 0ec335394a72e80887a3672f290bc5828d8227e0..f2066bdff489a0e7af0c17fa8ccf736412194aad 100644
--- a/transcripts/uncorrected/51.txt
+++ b/transcripts/uncorrected/51.txt
@@ -1 +1 @@
-I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
+Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.
Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.
So if you happen to know, you should know of any products on AliExpress and product numbers, list them please.
\ No newline at end of file
diff --git a/transcripts/uncorrected/52.txt b/transcripts/uncorrected/52.txt
index 243f36cf36c052964af7ebe83a792dae9e67d205..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c 100644
--- a/transcripts/uncorrected/52.txt
+++ b/transcripts/uncorrected/52.txt
@@ -1 +1 @@
-I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
+I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
diff --git a/transcripts/uncorrected/53.txt b/transcripts/uncorrected/53.txt
index 35a55fa10abb62fbf49bc2c38d73e8cc53fca620..24994713fc006cf39dff6433f341d9e5b812c141 100644
--- a/transcripts/uncorrected/53.txt
+++ b/transcripts/uncorrected/53.txt
@@ -1 +1 @@
-This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
+So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
diff --git a/transcripts/uncorrected/54.txt b/transcripts/uncorrected/54.txt
index e3960e6d457375f71a0aa63d07c4c8ad4af74fc2..5eac1414e49e1b8618ce1ba2193d7d10b91f431a 100644
--- a/transcripts/uncorrected/54.txt
+++ b/transcripts/uncorrected/54.txt
@@ -1 +1 @@
-Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
+I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.
In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end?
\ No newline at end of file
diff --git a/transcripts/uncorrected/55.txt b/transcripts/uncorrected/55.txt
index 73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2..8eb532b0a713565b3b2fae20960656ec0d9e6e2f 100644
--- a/transcripts/uncorrected/55.txt
+++ b/transcripts/uncorrected/55.txt
@@ -1 +1 @@
-Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.
So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.
So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.
So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.
The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot
\ No newline at end of file
+Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
diff --git a/transcripts/uncorrected/56.txt b/transcripts/uncorrected/56.txt
new file mode 100644
index 0000000000000000000000000000000000000000..492695d3c04244eba8ee90b40f4d0ed8cbb6793b
--- /dev/null
+++ b/transcripts/uncorrected/56.txt
@@ -0,0 +1 @@
+Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
diff --git a/transcripts/uncorrected/57.txt b/transcripts/uncorrected/57.txt
new file mode 100644
index 0000000000000000000000000000000000000000..acadef7c73d2b38c88ec7b03751c008a67eca4fc
--- /dev/null
+++ b/transcripts/uncorrected/57.txt
@@ -0,0 +1 @@
+Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
diff --git a/transcripts/uncorrected/58.txt b/transcripts/uncorrected/58.txt
new file mode 100644
index 0000000000000000000000000000000000000000..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45
--- /dev/null
+++ b/transcripts/uncorrected/58.txt
@@ -0,0 +1 @@
+Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
diff --git a/transcripts/uncorrected/59.txt b/transcripts/uncorrected/59.txt
new file mode 100644
index 0000000000000000000000000000000000000000..353b380ddee0d6134e7cfc905de9171524ef566e
--- /dev/null
+++ b/transcripts/uncorrected/59.txt
@@ -0,0 +1 @@
+I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
diff --git a/transcripts/uncorrected/6.txt b/transcripts/uncorrected/6.txt
index 35a25c66c27c2d44f0a64ca785442bcb2b03db07..7ade92ea48527be48c9ed28805bb0153509bb3a1 100644
--- a/transcripts/uncorrected/6.txt
+++ b/transcripts/uncorrected/6.txt
@@ -1 +1 @@
-This repository contains a folder of screenshots.
The intended use of the screenshots is that they will be integrated into the README or other documentation to demonstrate the UI of the app.
It's important therefore that the screenshots have descriptive file names.
Please rename the screenshots for this purpose and integrate them into the README in the most appropriate section.
\ No newline at end of file
+Here's my idea for an AI podcast workflow. I think if it's just questions summarized by AI and people know that the whole thing is text to speech, it's a little bit off-putting because people think I don't want to listen to just a robot speaking the whole time.
I think if the podcast format was that my voice prompt actually makes it into the final output so it starts with me recording a voice prompt as I'm doing now, then that gets transcribed. Then the rest of the workflow is the same, but what I do for the actual episode render is I combine my voice prompt with the AI response. So that you really get the feeling that it's me actually asking something that's definitely not AI. That I'm an identifiable person speaking. And then the podcast goes from there.
I think it would be more effective and more impressive and more enjoyable to listen to.
\ No newline at end of file
diff --git a/transcripts/uncorrected/60.txt b/transcripts/uncorrected/60.txt
new file mode 100644
index 0000000000000000000000000000000000000000..da218ad130c3c5a5f3ca672509c6c517f4fa87f2
--- /dev/null
+++ b/transcripts/uncorrected/60.txt
@@ -0,0 +1 @@
+I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.
When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.
The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.
This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/61.txt b/transcripts/uncorrected/61.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0ec335394a72e80887a3672f290bc5828d8227e0
--- /dev/null
+++ b/transcripts/uncorrected/61.txt
@@ -0,0 +1 @@
+I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
diff --git a/transcripts/uncorrected/62.txt b/transcripts/uncorrected/62.txt
new file mode 100644
index 0000000000000000000000000000000000000000..243f36cf36c052964af7ebe83a792dae9e67d205
--- /dev/null
+++ b/transcripts/uncorrected/62.txt
@@ -0,0 +1 @@
+I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/63.txt b/transcripts/uncorrected/63.txt
new file mode 100644
index 0000000000000000000000000000000000000000..35a55fa10abb62fbf49bc2c38d73e8cc53fca620
--- /dev/null
+++ b/transcripts/uncorrected/63.txt
@@ -0,0 +1 @@
+This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
diff --git a/transcripts/uncorrected/64.txt b/transcripts/uncorrected/64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2
--- /dev/null
+++ b/transcripts/uncorrected/64.txt
@@ -0,0 +1 @@
+Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/65.txt b/transcripts/uncorrected/65.txt
new file mode 100644
index 0000000000000000000000000000000000000000..73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2
--- /dev/null
+++ b/transcripts/uncorrected/65.txt
@@ -0,0 +1 @@
+Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.
So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.
So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.
So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.
The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot
\ No newline at end of file
diff --git a/transcripts/uncorrected/7.txt b/transcripts/uncorrected/7.txt
index c3e6aec46313e6c703697e4fcc48f050db3015c1..ed50ed359bc55372aab37746585a31f7525ccc9a 100644
--- a/transcripts/uncorrected/7.txt
+++ b/transcripts/uncorrected/7.txt
@@ -1 +1 @@
-What's the most professional way to install a package on Linux? If I create an executable and copy that into the directory on path, such that I can call it, is that considered a worse way to install applications than through a Debian package?
\ No newline at end of file
+I will try to build. What I want to build is this: I don't know, is there a name for this kind of workflow? So let's say I go out taking B-roll. Now, right now I'm using a lot of it for populating my own library, and sometimes I share it with stock libraries. And usually, they strip the sound. I like to have a workflow in which, well, my ideal workflow would probably be something like this.
Let's say I have a folder full of media and P4 files. I can usually end up with a few mistakes, unintentional takes, and those usually would be like kind of less than five seconds duration. Usually, I just eyeball and I look for the ones with a small file size that's too small. Next thing I like to do would be stripping out the audio, batching, putting the video into its own folder, and then maybe, because for stock I'm shooting it handheld, it should be stabilized. So, stabilization.
So it's basically a pipeline. And my question is this: can this be done? But if I want to build a few pipelines like this, this is, let's say, my stock video pipeline. I might have another pipeline for sorting, so I might have a few media pipelines, and I don't want to have to go every time into a repository and run it. But it does make sense that it's just a script, basically.
So what's the best way to have a few scripts? I'm basically asking what's a good GUI for this kind of workflow? I want to have my media folders, and then I want to say run this script within this folder, and that would take the TDM out of setting up and resetting up environments and Python and all the rest of it. So what would you recommend as a tool for doing that?
\ No newline at end of file
diff --git a/transcripts/uncorrected/8.txt b/transcripts/uncorrected/8.txt
index 72dd47f2927e95f6a555120604796efb0f7010e8..acc8d62d6d5b71235676ccf824c7860bf8c12d53 100644
--- a/transcripts/uncorrected/8.txt
+++ b/transcripts/uncorrected/8.txt
@@ -1 +1 @@
-Your task is to take this system prompt and rewrite it for implementation in a structured AI system.
In order to do so, adhere to the following instructions.
Within the text of the prompt itself, define the The JSON output that the AI should be constrained to giving.
And instruct the AI tool that it is working in a structured workflow and must only return valid JSON.
Create a folder for the prompt.
And add in addition to the rewritten prompt text.
You should also create a .json file containing an Open API compliant JSON schema and finally and you create another JSON called object.json which contains just the JSON object.
\ No newline at end of file
+I have a question here. I was exploring lately, getting up earlier, and it always really appealed to me. The idea of getting in sync with the sun, like the natural diurnal cycle. Stricadian rhythm, when the sun goes down approximately that's when you get ready for bed. When the sun comes up, that's maybe when you get ready, that's when you get up. But that would require, in the winter time at least, here, where I live, going to bed as early as, I mean I guess it depends. Whether you'd want to go to bed immediately at sundown, I think that's probably not realistic, and a couple of hours later. But even if you did the latter, you'd be talking about going to bed at like 8 o'clock in the winter, maybe as early as 7.
Now my question is, my interest in this really comes from a question I've always wondered or thought about, which is that until relatively recently there was no such thing as artificial illumination that you could click on with a switch in your home at least, and even the concept of street lighting being totally reliable and totally every street in a developed city being covered in street lighting, that was also a foreign concept. So in the evolution of humans, it seems to me it must be the case that this is a very recent adaptation.
So my question is really, from the historical record, what do we know about the kind of sleep cycle that humans gravitate to naturally when there isn't alternative lighting? Artificial lighting. Thanks for watching!
\ No newline at end of file
diff --git a/transcripts/uncorrected/9.txt b/transcripts/uncorrected/9.txt
index 76af9ed38a7f3a464480738293afb78a25ff5929..8a430e2e093e18c208dfeff13e05eabe999f06dd 100644
--- a/transcripts/uncorrected/9.txt
+++ b/transcripts/uncorrected/9.txt
@@ -1 +1 @@
-Okay, so here is the type of license that generally work for me for open source projects. I usually open source software because I've created something useful. I think other people might either find it helpful or develop upon the idea to do it to take my idea and ability further. Attribution is always appreciated but I'd only want to make it mandatory if that wouldn't really sort of create friction with other people who'd like to use a project.
But attribution really helps me because it opens up the relationship and connectedness of open sourcing because if someone were to use it downstream, they have a way to sort of get in touch with me. People commercializing open source software doesn't sit very well with me, but again, it's only if it's, I'd be very reluctant to add that as a limitation.
Other than that, nothing else really stands out to me as something that I'd require. Like if people took it in any other direction, it's fine. The only one I think about sometimes is obviously no one wants something that creates to be sort of misused or used for harm. And one also doesn't want to end up with lawsuits if something they create is misused, so I don't know if there's any legal language that can create a little bit of protection around those potentials.
\ No newline at end of file
+I have a Nord 3 5G and I'm looking for a power bank. It supports this fast charging protocol. I think it's called SuperVOOC. And I was looking for a power bank that could basically charge it as quickly as possible, deliver the fastest charging that it can support from a non-AC outlet.
I got one from Bezeus before. I don't know what it was, it was 65W, I don't know if that's relevant for mainly smartphones or if it's just for laptops. But in any case, I think I've lost that power bank, so I need a new one.
Now I guess what I would probably like is the biggest capacity that you can fit into a power bank form factor. By which I mean, at a certain point, we're not really mobile, they make these power stations I think they're called. So the biggest thing you can get, and not an exaggerated spec but a real credible spec in terms of the mAh.
And the quickest, the combination of the quickest charging and the biggest capacity for this particular phone. Anything you'd recommend from Mosaic or other, let's say more credible manufacturers?
\ No newline at end of file