diff --git a/annotations/1.json b/annotations/1.json
index fbe019c8315d56b9d9939ef34049c210d62504e0..73ec65d495043af360eb5c80abce3fea46e4c1e3 100644
--- a/annotations/1.json
+++ b/annotations/1.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:a38f06b2c513e2894cbcb6b608b24dd010b979dfc85bc5d1eead96c90f0c8f66
-size 599
+oid sha256:9787d250e8ef057f2409ded434c888f944859e588de179485b685cd0473b8fe4
+size 596
diff --git a/annotations/10.json b/annotations/10.json
index 2ccba83a5ac99a30720e2ddc86806ff169189d94..20b6a8f2e0da9a1acf908049e4677814e4f8ad05 100644
--- a/annotations/10.json
+++ b/annotations/10.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:b9b2682e14b1627d8291199313a78bf3bb6d8eaeb0ba8bb3ca92956f28d31e38
+oid sha256:ec40587442d11cf3f07891b8f4d829ae746ff8d4bf5d2c57a88bb6cb73415c9d
size 603
diff --git a/annotations/11.json b/annotations/11.json
index 9ecbf1d007914126545110cd0d9723443c9e9a4c..9f26c42b0d241e66ad97800f7b627600c4aa559f 100644
--- a/annotations/11.json
+++ b/annotations/11.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5945b087a33bbe981e22de200d7da62b56402a3a07cba5e173de4679cfc85478
-size 599
+oid sha256:cabf426120252f3fc865e5af43336d59345fee774213351211cbac178375a52f
+size 600
diff --git a/annotations/12.json b/annotations/12.json
index bae30ecfe0cbe12d9d4b80b437ddb31fdc624876..cafdf04dc46799a3dbf7dea51406e24aee259abe 100644
--- a/annotations/12.json
+++ b/annotations/12.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:3c67327f8b46380d4cd904b54839a1999c778e97a3015544c50278300857406c
-size 599
+oid sha256:c6c0c4d41decd0727fe74db228ee415a82d771fdf3c742a9ef7b58763dd02645
+size 604
diff --git a/annotations/13.json b/annotations/13.json
index 3b56b593a36a05036e0289616061eea45d121da0..1209ea3a53db4171783b42fd1b6256f4f8e369b5 100644
--- a/annotations/13.json
+++ b/annotations/13.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:34cecb2c2ad068588d63b5e9b11ce58d8b171fe0d80f48671cafdf8864e69fab
-size 599
+oid sha256:d7f52128b9815c52a4ef98c6a382819587c9f75947c82c38a160cb61f627361d
+size 603
diff --git a/annotations/14.json b/annotations/14.json
index bb4bd0964e6175dbb358f1ad19121b2e62e69b76..9e4800f82a329a48065aa5bb616524958091b2d8 100644
--- a/annotations/14.json
+++ b/annotations/14.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:2d1d1920a715510beed3dcc9f8283f1c8317c02ae7a501331526408f0baaa34f
-size 600
+oid sha256:d9621bb571ab18b2b8b09e1faab889d15b3c867e835c160da0e84bd129e2760c
+size 603
diff --git a/annotations/15.json b/annotations/15.json
index 23930e40366fa5600d9a791e146ded46cf22a9b4..563a47e26c7b7844acb673ac775b1f0220592c06 100644
--- a/annotations/15.json
+++ b/annotations/15.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:8efe187df06b6e3bb13f08361db3b2a5af64aa2f27c2cb70816c54638f3e4121
-size 598
+oid sha256:797e01a78533bd691f971b0b89982bfb28c50e332fa1d1bba620f20c4795b9d0
+size 603
diff --git a/annotations/16.json b/annotations/16.json
index 5efec2328e7089b5b94e7ec53cc2dde7ad49c7ba..c5ebd36dddd8ce38a0d9e0b25d5da2ee7299d93e 100644
--- a/annotations/16.json
+++ b/annotations/16.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:77c1d454ed0ed0707c03df759181d909792e6a4b4f80df067635f79332bd1d6e
+oid sha256:d9ce5becc479ea390dd553a6a3aafd44ab10fe859b4f6d4492e7e36792da17d9
size 599
diff --git a/annotations/17.json b/annotations/17.json
index 647f26739ab7e5d7ca7d78ee262bb7e2e2021b6e..8968a5bb9ef3a96705c953567e4a7849cd49e811 100644
--- a/annotations/17.json
+++ b/annotations/17.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:485e34bf8f41c01b3d203794854db509fa3734df14d92715bcab9ba78f3b9887
-size 597
+oid sha256:4ac21ec6af7dc50acc315695116dc45eaa18b8bf02b3fcea55a05913c4a5a210
+size 599
diff --git a/annotations/18.json b/annotations/18.json
index 1e00dfce64eba370e7286143ca0dc252d17f58c1..1f5becb1133d55f19d261cb0fb7eb600345e9b16 100644
--- a/annotations/18.json
+++ b/annotations/18.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:298a76679c25bdba71d35b35d5c9abf2010235f4ced089e7d78ecc46665b3eb9
-size 598
+oid sha256:da4a322d5f89fe10b29b934f427e88723a6e902e16a73b60461a3f13fb71d0f0
+size 599
diff --git a/annotations/19.json b/annotations/19.json
index 48a93300a9ead5a531e22183c1e5d13f55104216..201bd3345b6ad30702bb80976a43bd3dff822933 100644
--- a/annotations/19.json
+++ b/annotations/19.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:fabb44ff012ef2767104fcd66251786db1b1bca1ab4bf0acccb8dba6fd35085b
-size 604
+oid sha256:8eb3c506147c0897cbebbc68b493f7afdad15933934dd1a05225ca948c5b1375
+size 600
diff --git a/annotations/2.json b/annotations/2.json
index 355963418fe0b34000471aa37d2afc42beb84a4d..09016cd3ac024c95052e2a50d049a6bd94c4cd15 100644
--- a/annotations/2.json
+++ b/annotations/2.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:74ff12db8cb3c26f6b6eb0bb39c2d601b488bd9ff13e47b541f1f41b7c18f5fb
-size 603
+oid sha256:9afea1fd660781c67db25d61fa9d761ca078ea96071000326deb5bd70ce4a606
+size 597
diff --git a/annotations/20.json b/annotations/20.json
index dd5e98bca5c8e41ca1e0753e66d47c3d63a11eb7..bb8d990bc02b004259b4064dce0046f1e6c577bf 100644
--- a/annotations/20.json
+++ b/annotations/20.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:f94c419ee902508d66d7d946ec726ffca221a748028c498759c33dd98b5e21a3
-size 599
+oid sha256:c3896a2272214bd463e0211f9e4be1f40f84bcd663d45009557828aa3281d886
+size 598
diff --git a/annotations/21.json b/annotations/21.json
index 9dd1d43f1c5ad0de31c54f7c1ccddf785fe5c428..e44083923ce9ec07cebc95ebc690d9800014345f 100644
--- a/annotations/21.json
+++ b/annotations/21.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:11a7b663aceeb4d0003120718219c11c7fd1a4960d03d68ac1b6ae9327817810
+oid sha256:05123fb044f181ac464911b36b997039909120ce38c9465e142b9922bc009c94
size 599
diff --git a/annotations/22.json b/annotations/22.json
index 5ec0ce86bb0fa47953c46d61d39296d55a9f9a8c..bc1b0e185fb10c06d42d63a7137b111539dc38fc 100644
--- a/annotations/22.json
+++ b/annotations/22.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:b811fd2ef28bdea6b10084440a59e330f435ffeb3f24b3222c1e5e10e8f1199c
-size 599
+oid sha256:1481072c2f1637d68829e7eae141bfb779e802a832108fadf31332deaad35623
+size 597
diff --git a/annotations/23.json b/annotations/23.json
index ac11b8ab5471015efcd71fd40f333d687ffa33e7..e7d39d952fe1bca1e5a5ac18d95f072dff32e33c 100644
--- a/annotations/23.json
+++ b/annotations/23.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:de070a09c40dd7f9011010b936f3bd98194ef0b09bade41e59fd3c9f5afb0bd5
-size 603
+oid sha256:ab531feb12df0a4e22b1f8aa2dd719073ea0f8ec8eeae9ad0cd4ecc70dad52cd
+size 598
diff --git a/annotations/24.json b/annotations/24.json
index 3738bec44205f4a9413c2b1f1cfa308ca59377be..f486484cfa62d40eb87b34270dfc7f6cba5e0e54 100644
--- a/annotations/24.json
+++ b/annotations/24.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:acfbb5d2adb7e7e1385cbcedd5348e10f9cae1eb0182de4b5acb3205e2748aaa
+oid sha256:95fcda89c918a86768494817889d306d6bab585de36267ddd591a73c0b89bc27
size 604
diff --git a/annotations/25.json b/annotations/25.json
index 8e2a356d599b46992dc3346e67a195ea32b85b24..14496041e1f28dbc89014213dfd71e002f43eaf7 100644
--- a/annotations/25.json
+++ b/annotations/25.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:13c8b60816471a57525dbbc921d7b9f211952ec7eb7c73035226320aea2689f1
-size 601
+oid sha256:2c4588db84775c788bfd24276681aacf5927470f58dbe34534d10969b2aaaa80
+size 599
diff --git a/annotations/26.json b/annotations/26.json
index c3904998121b527202f1746c22503a9508b44559..f93839b4b935d1d6c22e6e188fd4ceac501eea66 100644
--- a/annotations/26.json
+++ b/annotations/26.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:89a95c9348889d11e3f62366d320ceb6f3a25f31210b6194e0dd7001fba0fa0f
-size 603
+oid sha256:a8f02ce80c153abfe7ea1533a5d1026f2e9e716c4b13fd8470d3a7417abfc59d
+size 599
diff --git a/annotations/27.json b/annotations/27.json
index ddc8051ae4b3127e61f56e06fe02a6ab18f065c8..70e1abd65dbb0addc73bee0b0f54cf4a1f8d0ca0 100644
--- a/annotations/27.json
+++ b/annotations/27.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:7546d72b8219987d553db136b838775c598107ba195e11351ce4987f06a4de6e
-size 603
+oid sha256:b42475d48ebb5544a67c19aa6f403c5785aaa7c9ca3ca3e135c88aaf159f0222
+size 599
diff --git a/annotations/28.json b/annotations/28.json
index 455e42ffa800d8468495593bf372c3e8d90db636..e3ce855cec36061b309d046615be4e2e7e0d4d74 100644
--- a/annotations/28.json
+++ b/annotations/28.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:c6a1b80ca5d0adfa6896b2dcce88a3e096e1d2f67c92d78f6f3e16c06e3dc81d
-size 600
+oid sha256:1953d96d4c0a8925110e42f65767cab9d0ce5f1a00e6229302767b5d7b1cd04a
+size 603
diff --git a/annotations/29.json b/annotations/29.json
index b04cb4eede4976f9dfa07759db865fc15cd31c43..46f3f8aed254f3b68528494b97d886392b4bce84 100644
--- a/annotations/29.json
+++ b/annotations/29.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:90ecacf6be600eb4f01b600f384b54dc7a6d6ef887b81fa1531b31ef2003298c
-size 603
+oid sha256:82bc923a70751f2165291298ae5090e3b10eae32ebf1a04f124dda242b763939
+size 604
diff --git a/annotations/3.json b/annotations/3.json
index 2073ca37f2eadd72a1273618bfba576a728c6977..3878d5c855453e995d0a1ceb0af2363620d22da4 100644
--- a/annotations/3.json
+++ b/annotations/3.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:d5df81a7e69c57f30fef4677f0a44c409a6d7cfc5070163c541d5bb8e0bf0e16
-size 603
+oid sha256:8f6a934624b36f212a2db39f8a69528c896cccc5f4f1b18ba56e3f0339bbffcd
+size 598
diff --git a/annotations/30.json b/annotations/30.json
index 2e2db0a09ee348b38a5580a342478ad245da34d3..89706ad2a1daa8cae404a4487e547cbd0327d5ef 100644
--- a/annotations/30.json
+++ b/annotations/30.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:fc09e8ba3a76b7122b3c5f136c0615746de6a615a6760e02678386e554ca9789
+oid sha256:379e0079d68ad5acb6aa82889cf66a21aa6beaa14e4296a2ce1f79bf75c7d400
size 601
diff --git a/annotations/31.json b/annotations/31.json
index 80a9e5987dbb06944ce2bf3492ba775e3de9037b..70a7b50d898516b233e9f7c0352f345212314c7b 100644
--- a/annotations/31.json
+++ b/annotations/31.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:bea5fd290c25c8b790c5cc5286183fc5b89a6516958adc9473565e75d9f69abc
-size 604
+oid sha256:2ee195cd351116dd4429cc0815bfa8c92d73c78e24e607a65038c6e5d0fbd136
+size 603
diff --git a/annotations/32.json b/annotations/32.json
index 53cd0bcf79970899ad29448e1c96bb78153800ae..d6ee03333d73833b3bfa6b2eac0a016473029a0c 100644
--- a/annotations/32.json
+++ b/annotations/32.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:a27a0b6953772146b1beb70d1d13347ed684252b0dd7407add9a1cf411241d6d
-size 598
+oid sha256:8d2c6219369bdbbad3684b3fb3bdccca94f33da2f3a975fa7c7bca89d26f4215
+size 603
diff --git a/annotations/33.json b/annotations/33.json
index 725c38546efc29298e56dd1dede50440bf56893a..6bb347535e7aaeac673b20804ed720065d64ce86 100644
--- a/annotations/33.json
+++ b/annotations/33.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:3247c205a22b66febd2fed87e142f76238bd055c8aefe091d8ace9e60d37353e
+oid sha256:b11d07e8a7caf9f8d0f0f110d40da8a677ee60b3f0f0940302bf53962f76022a
size 600
diff --git a/annotations/34.json b/annotations/34.json
index 1a6b50822feb8a06e0287d2a35305c88b1fe58a5..ab06f25d3c681eeb72df5003cd535b26f7076ee8 100644
--- a/annotations/34.json
+++ b/annotations/34.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:3e4d0e425cb65ebbe9bd6b646f62e980731b3b58eb2f597d6a9c033ed50b1dcf
-size 604
+oid sha256:5d8686bca73436aec2391e7de382eb96ead7808b6d50545c860e2200d4be5c2b
+size 603
diff --git a/annotations/35.json b/annotations/35.json
index 52b95c2fedda0245e4b21f0e7c9af915737924de..4b695e8ba1fc7139d1a9cc041b55fe0aaa20bee3 100644
--- a/annotations/35.json
+++ b/annotations/35.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:2d6531434f6bee9b389c99e301d7cbd63352f7420b02303f40b06d42b8fb8f24
-size 598
+oid sha256:a94e57e3d1fa56b8f227a99a1313c3040584c5f1e18ff4c2613dac0633c4f88d
+size 601
diff --git a/annotations/36.json b/annotations/36.json
index 0f36bcfb254599ab159adb7b04d7c1cfac56ad20..5ab32f20785bbad63583d41be2a83d3385ab8dff 100644
--- a/annotations/36.json
+++ b/annotations/36.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:2d7b57ab0b7498a8e8b9a6e4bcf4fef778dbcc24f37980e411f0bd0c5895b6f3
-size 603
+oid sha256:b92d2022be14945739d30d4717e7e372e57d194702d880e5bdae3b3e1eec9098
+size 604
diff --git a/annotations/37.json b/annotations/37.json
index 61610050771da785c6b9bb5c0a41e828244aef3d..46ebd664d66eb4de9f6d6fb5f6ce9a3e406c5725 100644
--- a/annotations/37.json
+++ b/annotations/37.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:f8eff753d99a77beafc4d658d48798f3b8f805ec29d261acf8bf2d5ccaad8266
-size 604
+oid sha256:5e3718dd1de46cb41406e96cdc337059931ab2aee8492b355fddc34d7fcd8ee9
+size 598
diff --git a/annotations/38.json b/annotations/38.json
index 6e5ea1fb6e7c30a22438b2125fb66ddd65f3e4b4..11e1c561fa3d83d0e05477172221dd6e28211603 100644
--- a/annotations/38.json
+++ b/annotations/38.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:001113f9776744e4363cb0aa05a1b61508fd229c44bca6c79ba449e8646a4ac0
-size 597
+oid sha256:e1a2695f87ff3f0f172a50c0408410aa0a23b968878bea43f226ccf4752fbfc7
+size 600
diff --git a/annotations/39.json b/annotations/39.json
index c0588b7375c2faf1c5597e3e9e10ef29bc611ad2..0425c39f6b0b4f3c214983821618f6beb91eb409 100644
--- a/annotations/39.json
+++ b/annotations/39.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:6d014a6f7c864f5f0e5c699eed4fb68bccf2f87cda50f7010e8dafb8df786b4e
-size 599
+oid sha256:e99871dc0b5034cdb068926f239eb20753c0d8ff3f20ee78de73c6c06424c202
+size 604
diff --git a/annotations/4.json b/annotations/4.json
index 5a1b0988535c7fb3e5d9d163e9c12f64e3d4e6f1..f25804e16ab01306fcb206865d0f81b90b75fa46 100644
--- a/annotations/4.json
+++ b/annotations/4.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:f76fbc7b6ab0afebb8dbc2b60569a169df87fba076712cf641c9698de7889fc0
+oid sha256:820d6fad2a1e16af407d4aaee39c56c61388b1459f664bb43dc85ace6da28c97
size 598
diff --git a/annotations/40.json b/annotations/40.json
index 530a1082ecca105aad421bff859f2e152840e08f..4244f708ad8f217cea987c1b1b3b68b3c07bfab7 100644
--- a/annotations/40.json
+++ b/annotations/40.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:c1898f94401693ec68c2be59cca229c4fafeb0a34487182b873d3450245af20e
-size 599
+oid sha256:7b2f5df282d3e4bc7f3b05cae649cb40ee4e80c523b4366e8167878e08e8baa3
+size 598
diff --git a/annotations/41.json b/annotations/41.json
index ec12a34a3ef7a0abea2343aa85184f54e1f5f067..4a77ca100cb4fc2391b860c89645f38b6b7940f7 100644
--- a/annotations/41.json
+++ b/annotations/41.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:76505fa632d03424d471c033efe915fa1739ba6137339391c9efdc140ecfc9d9
+oid sha256:34195f438a047ebae5da7bfbda50f88a8d9910ae909c476cedfef73dea26013a
size 603
diff --git a/annotations/42.json b/annotations/42.json
index 20a00a73fdb8b103991b9bd7231bdb53b8a876dc..c56df888453203695255bf4ce55509fa8e58bda3 100644
--- a/annotations/42.json
+++ b/annotations/42.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:ac08904075d8a572c2085847d0bea1d08c8a29c882781ddb96b0b7725dc38be1
-size 601
+oid sha256:610b1492cdd11b10bc79fce815583a8978adfee68cd9e6424ca32a15f8c6a152
+size 604
diff --git a/annotations/43.json b/annotations/43.json
index 3dd6fec0764da6ccf485df3d157b11139908976c..bde416c87d001371342f13a188204016bf3d75ac 100644
--- a/annotations/43.json
+++ b/annotations/43.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:853c519cc07ab91045859ae6a1aeca4d4c9bafb757c2b4838c66bea5f6e17832
-size 602
+oid sha256:d1648eefa3071c138a1eadc0327a28196d46b93b448d96d0353825fec4c33934
+size 597
diff --git a/annotations/44.json b/annotations/44.json
index 195705fbdb7f4e90cc5bec9505783de77c3f9e2b..6e97f56088de4aad2832d731c5a056ded86df72f 100644
--- a/annotations/44.json
+++ b/annotations/44.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:873b8fba2c07856f1a12b707ffd1a387da314e2d3d553fc4ed30cc363667de50
+oid sha256:a9ea7666df53659019cbd81bfa5357b49aecfdeb04ae24161457d74f9284869d
size 599
diff --git a/annotations/45.json b/annotations/45.json
index da6d4f522d57d903c1b088bf0c7ff129743145fa..7ec311b52b60f3ae461c7b9a223bef1a1bb10798 100644
--- a/annotations/45.json
+++ b/annotations/45.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:37f2bdd41890d2b09697cd4a77f49212b075f321adbd6004921eb155a6db317d
-size 603
+oid sha256:30e43bf67459d0c6f090608d034d68e3ba58724582f74e82f38f965636f66b75
+size 599
diff --git a/annotations/46.json b/annotations/46.json
index a30e5c6883d5d7e6d481cea79f3a5d9743c70502..df42ac2fd93b9e74699b65f3886f1007fcda6a33 100644
--- a/annotations/46.json
+++ b/annotations/46.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:1f806ffe2111744774b947bb505cbaaba7e74a7a9ab8847574dc28d72f4a4d82
+oid sha256:16046be3038eb326f337da562cd1a1eb544a56944dab2a8645c339796ad12e1b
size 603
diff --git a/annotations/47.json b/annotations/47.json
index 25e653478ede178674475c11326dfa918a044b07..3941a2367accd08125c75c547ca63593302fd2a5 100644
--- a/annotations/47.json
+++ b/annotations/47.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:81fbb412e1ea79a90f7aff0decbf198c23661c4fda1a3980d4cf6eecb59b1a60
-size 604
+oid sha256:644b1e46351a09d6eb313b31d5c9ed828227b5a3bf858b468dc1e930150c4a15
+size 601
diff --git a/annotations/48.json b/annotations/48.json
index 5eadf624d34c40cfed6516e9d9468696844d0a7b..0d2ca67b556ee0d700feb0da40d0ae81467ab3fb 100644
--- a/annotations/48.json
+++ b/annotations/48.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:3e4b73cae21e9e732c43bcf8eed2f066b1acc410cdec93a2d938d57c77834b99
-size 599
+oid sha256:d28a78c5154d33860d8fb48f2b2e9a81c5367452ac606c519322764be6ea4749
+size 602
diff --git a/annotations/49.json b/annotations/49.json
index 63b9ff56291376021234fa0bf1ee0a22f533c718..93c978562eff35790125bc889bf3a81c2598e116 100644
--- a/annotations/49.json
+++ b/annotations/49.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:41c85e6a287c9fd7be56d28258620b8c9f87559a739474a0049553b47dec5be3
-size 602
+oid sha256:9c4ba5ca3bc1d918fa6e5cfd007237e22a71009520e44a7927074d2429c864e3
+size 599
diff --git a/annotations/5.json b/annotations/5.json
index c0c5c5c2cac5020fd055d71a7dcfe79374995eec..a30683e0670e0afd13797cbc0cb86bd4ceb32a4b 100644
--- a/annotations/5.json
+++ b/annotations/5.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:7876c553652aa45ab34695e4d00480d5f8c7ab1ff040bb88fc37b57e6637fcb5
-size 602
+oid sha256:b8bc6b08da2f0d138ad2bed5b3a18d1681f186fc78cc0b0185df46deafe2dafd
+size 603
diff --git a/annotations/50.json b/annotations/50.json
index 1377c94bff62621adbf494556fecdfb4ed0c7c48..36de0e673199eb6028edb8fa711832a922818974 100644
--- a/annotations/50.json
+++ b/annotations/50.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5aeaecec38b4e49224b80c2575376af2e31e67b6b87d58647c9b33a78b1a3f4d
-size 599
+oid sha256:ca1e59791602e67cb48f418bc54cfb4a758bcf2a20c44adc514e058c1297e06c
+size 603
diff --git a/annotations/51.json b/annotations/51.json
index 427bf06c961b838fc942a29767a001095ce3a13e..83403ceac2bc5ac357ccd17a7485698935058ba2 100644
--- a/annotations/51.json
+++ b/annotations/51.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:8723d99b04f84f24ed885eb66cfa2302ab56639777c2b8bbdd8b25dea75dc7d5
-size 599
+oid sha256:863c18c30a946675c43dcf2fd779feace628b3b377f0221762590ff52f1d0c33
+size 603
diff --git a/annotations/52.json b/annotations/52.json
index 8c1a9acf05d110c32508e3150ee177f6aa434017..9ebe724774b8e1c526adba951a45c2e9b5212773 100644
--- a/annotations/52.json
+++ b/annotations/52.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:906e3782df5b38a6c5a0459970988d23fcb14973df5e2f15ecfeea9a67a80d4d
+oid sha256:02732f1b34d9cb4c09a9fd0f09732125fbf8d714eaf4e4f046a4d3addc56c2ac
size 604
diff --git a/annotations/53.json b/annotations/53.json
index 5ff93555c2d649d105ce5f80b521aab44838d055..26c4c3a38374b55bfb0643108f407ad35e918cee 100644
--- a/annotations/53.json
+++ b/annotations/53.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:dd7c32ba7b9f0daec60367d11f41a958661109a7b8e4270d1ca5f599bd833c87
-size 598
+oid sha256:040174271906c677dfdeed9a80b9cb3f88cd27be02c5c5fe8b3059f8c91edb0a
+size 599
diff --git a/annotations/54.json b/annotations/54.json
index a594ec46f2df37454d576c8d88aa219f28630d62..841094c619901e446104b5d2128a882e06951996 100644
--- a/annotations/54.json
+++ b/annotations/54.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:06d6597ece8f7edc5ea3d618b1f2028a872a07dc9c25460f9ca1f588e69413c6
-size 599
+oid sha256:63c08eb17b6bcccbfe34f5b156206564bf9add5452a0d3b9b88e5552043f372e
+size 602
diff --git a/annotations/55.json b/annotations/55.json
index 98adef4ac29faac3acc78a99205800cad783e753..931cfdc811741a778043586e38dc4d69c05f2da0 100644
--- a/annotations/55.json
+++ b/annotations/55.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:ed22c153b40a37cf3eddf7a76e5de6a4a64be8a0037cc6d2649e87cedc360822
-size 603
+oid sha256:a3b01bb6fe41ec488c4790bba4c36656d50c0f47c1fe792d6d024faaf9b7c8e7
+size 599
diff --git a/annotations/56.json b/annotations/56.json
index 97b143fe3dfeab5205389cd7838d22daa39671a3..eee8d0e9312283ca6ec4106800db5e34f98921cf 100644
--- a/annotations/56.json
+++ b/annotations/56.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:d58979a989027703230b00ebad36b4bb36c6852b012c9ed8f947921110a21d48
-size 604
+oid sha256:072cd7ad693583a24bdac974d633c0e840c619c82fde61539a4ae3e9b0090c09
+size 599
diff --git a/annotations/57.json b/annotations/57.json
index c7d7025f77cc9c8c406d21617afdc047b68560ae..330a94da05ca940e97f2ffcb368f3dcc77100317 100644
--- a/annotations/57.json
+++ b/annotations/57.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e64ecc9865f16c3a8e9a3e8c25914a822ffcbdb8b5599b5e4c2fd20a68ee6d8a
-size 596
+oid sha256:a487f797d9bf6a88d741762b3b73e7d36fbdad6647df728c3569f27321f6dfbe
+size 604
diff --git a/annotations/58.json b/annotations/58.json
index 20c51889bbcd547bdbe1d9aab1eb52fd452d3c63..553750cfc57def1eaef4afdccdb48041175c515b 100644
--- a/annotations/58.json
+++ b/annotations/58.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:9c1de3e5f185c389046a64620472a746c501c9866fd6ab5a6ec190f78381290a
+oid sha256:28817002c4c1e31c87fd2c3da6083009f6098bdb657378e05b1c68fd08d73ca2
size 598
diff --git a/annotations/59.json b/annotations/59.json
index ccb17ca288d90f7833c5ced5a47a5d1b10df0b1e..24b1bd7c5b46856a0910fa489f94f5af99f37fe4 100644
--- a/annotations/59.json
+++ b/annotations/59.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:577afdfa64451cd10060c97e117d4ecfcbda76aac893fe4298d602cb0bc0a930
-size 598
+oid sha256:9126ec1a0f66799465dba9d5e4f9fe268ec353fba8e920976baafb69345bb230
+size 599
diff --git a/annotations/6.json b/annotations/6.json
index f1a619aa60cc667e0264832a15cac542251e54fb..d70e32994596884dbb8cd2e761733ef159705516 100644
--- a/annotations/6.json
+++ b/annotations/6.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:1e8b7ceffb563084d97cb75141cfd5ce481db3f0211fa005917fd2e0c5bfa190
+oid sha256:f7501da7a709fb4b5eb4b819ef4308f182dc121985a447c5727242fb37d0ff3b
size 599
diff --git a/annotations/60.json b/annotations/60.json
index 3fff97a482c85e68cbdc7e167f254bd4f9ff3f4a..a90391aba7841fe53332af39ebd08d59c965f776 100644
--- a/annotations/60.json
+++ b/annotations/60.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:b2bc51ae6aa8f1d97db2dc829b228aed66fa7c21d22fc9c429281bf35f365e69
+oid sha256:b95f7ba575852e509923826865785284f264272c06c001b8881e4d39361ba011
size 603
diff --git a/annotations/61.json b/annotations/61.json
index a54716cd71cb9d790d062a925ea21cf89b49ead6..521c1072e0c15c7ed83424b8da2c979b93f27dfc 100644
--- a/annotations/61.json
+++ b/annotations/61.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:7ea2e2fdcd4c97fe1fb28b5022af087db5b255e1366360eeff744e48d77185d1
-size 602
+oid sha256:41497c4f82db1b9f2310e9d2b064f68a0f2c0d0bdc0986e7d9e89e6ab3578343
+size 604
diff --git a/annotations/62.json b/annotations/62.json
index 467db78ee4285154240e5ed89d751faceb6d24fd..f8a106b0e6e020ef3a33e94451f44de1a2493198 100644
--- a/annotations/62.json
+++ b/annotations/62.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5a28f51ca21197f8da369a9cf4817409af15a03fcb99465632192cf2d9243a93
-size 600
+oid sha256:0ed46786c692a29197a63ad3033e2e87353e0cb17a9dd97547ef7d46a1c07e9d
+size 596
diff --git a/annotations/63.json b/annotations/63.json
index 8ec5f2bb0de55d9d97b821f7d5a35e4ea12482f6..aa1600cbb019f432e0f06c48e8a00b306ad772bb 100644
--- a/annotations/63.json
+++ b/annotations/63.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:98926afd75384a863298aabb7684ca8dcc8ea66cef449863d56d4be185b6ce09
-size 601
+oid sha256:6e4b538a2b144015f58c1ae003074ac7a65cec4109cc5c90fe2fe241a7b011d5
+size 598
diff --git a/annotations/64.json b/annotations/64.json
index f3e1ab360dc9bd5a864f0fb49204b252df8784e3..c0676a6507c487fb197d73ffd954360a6d653883 100644
--- a/annotations/64.json
+++ b/annotations/64.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5e1092162834e16484a9b26b4ab7547a79b6ca7eeed34c11d07f71dbfa5763ce
+oid sha256:54d0566fb72cb7e8d61de5e0047a27580f33b11fe29c41f9270ae9328b851e69
size 598
diff --git a/annotations/65.json b/annotations/65.json
index a67381e8d6c0546d8275bc032cbdf21aaf8fc485..377c9f65a9fe3f13d169c08226188c7bd7554060 100644
--- a/annotations/65.json
+++ b/annotations/65.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:7498966710776245dac5aa8533c609190d65697484ca65e300d2d1b549b7634d
-size 602
+oid sha256:8865bcf7b9d15b3826adda59cfd52a99859bca0e0c7eb7756190e0910bcc64af
+size 603
diff --git a/annotations/66.json b/annotations/66.json
new file mode 100644
index 0000000000000000000000000000000000000000..a571493b2b9b6a5ed3865a9c6d0562eb8d2ea25b
--- /dev/null
+++ b/annotations/66.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d0d8788626b9765a15f9675c4fcde9c99cb34774b8cae595aa7ce01ebc76bac0
+size 602
diff --git a/annotations/67.json b/annotations/67.json
new file mode 100644
index 0000000000000000000000000000000000000000..833166e7f50ff83b7cd0d608ec299c11830bac4f
--- /dev/null
+++ b/annotations/67.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f59e535d5c0bc08266b2c401160aa28ef3a4e2252d75ff40daa130739bc9336
+size 600
diff --git a/annotations/68.json b/annotations/68.json
new file mode 100644
index 0000000000000000000000000000000000000000..b250e049ca072eeb04d6722f72564a894f8883d0
--- /dev/null
+++ b/annotations/68.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e65372d383d52fcbba6ad818161af46576e22b5bad5a4508bde4da16522b4a76
+size 601
diff --git a/annotations/69.json b/annotations/69.json
new file mode 100644
index 0000000000000000000000000000000000000000..731159113057652aa83c046c08f397f5dbbccff0
--- /dev/null
+++ b/annotations/69.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:898aa8eea0ad17bf3953db96ddf9fbf16d44b679a85e755b0b13b42bbb12c71a
+size 598
diff --git a/annotations/7.json b/annotations/7.json
index 8a9d540a5c4f69a8931fc50aa9f7095f7baa443e..7f21a8b6bc988d3762d1aeaf84bfb981fb17622c 100644
--- a/annotations/7.json
+++ b/annotations/7.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:20cfd4fde1180c87c09a2cc749bafe21307e643d19e78b9275cc90df86baaabc
+oid sha256:7a34b5deaba180b04222ba9b2e5df502d753f09b9c3bec4e79f673e5fe74812a
size 603
diff --git a/annotations/70.json b/annotations/70.json
new file mode 100644
index 0000000000000000000000000000000000000000..fe8c7da957d04844a85cf6327b2a6b2949cbc118
--- /dev/null
+++ b/annotations/70.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fa4c35f873829ace31f5dbd184df882e556ffc60312b434525592f1d97472251
+size 602
diff --git a/annotations/8.json b/annotations/8.json
index 1ef95ae4ace4805eee0deee1816816716e5675f7..6891e64b4eab1fa2f73e16687708952b8007a33c 100644
--- a/annotations/8.json
+++ b/annotations/8.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:3ed45fcf5c8230e6dc23717daa0eabdc63445ce6be0a9e58221ce272d813f704
-size 602
+oid sha256:1274bd4623a5795a16dcff9f749b9511719434c38eb60895e7c5e8117ad63a55
+size 603
diff --git a/annotations/9.json b/annotations/9.json
index eac798fa51ab05443be3bde868dce7b0a4165190..ad45fbdda8a6d26fdf02420ee1c072a4702dd48c 100644
--- a/annotations/9.json
+++ b/annotations/9.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:ee941d4cc8c62aa85dd248b7bec7685bec7df17cf9dce613500f2548f182aa32
-size 602
+oid sha256:667c7a7ebeae477036432734f534803697770fdd534a11da843826922d1ef7df
+size 598
diff --git a/audio/66.mp3 b/audio/66.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..c7bf9a38a696b47ded9a2fb62a552b74dddfae4c
--- /dev/null
+++ b/audio/66.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0fe09dc5bf67af9a6f92fcf38b02508567fb8ce34984e744908386add67de18f
+size 3113324
diff --git a/audio/67.mp3 b/audio/67.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..25b62c47313ef75995abe35244594e65650334c3
--- /dev/null
+++ b/audio/67.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a5c658f0ee134e0c31a9ca939ad7805a9a76a321f5e3728dd575ce734c250ae
+size 1484396
diff --git a/audio/68.mp3 b/audio/68.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..34e9c6880398dfe1d777ed32bd0b8c82b9802f0f
--- /dev/null
+++ b/audio/68.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:73519e4c374f1b7aa73fafe009ab248ad470a0a17e9b522d265af6293a246021
+size 1006406
diff --git a/audio/69.mp3 b/audio/69.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363
--- /dev/null
+++ b/audio/69.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1
+size 1524716
diff --git a/audio/70.mp3 b/audio/70.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7
--- /dev/null
+++ b/audio/70.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b
+size 4384556
diff --git a/transcripts/uncorrected/1.txt b/transcripts/uncorrected/1.txt
index a6c4aa0dea473932cb03dfed9978e4ce2702e4a1..990a2d196c38a75d164d10b3eeeba44cdc6415ae 100644
--- a/transcripts/uncorrected/1.txt
+++ b/transcripts/uncorrected/1.txt
@@ -1 +1 @@
-For Frigate Plus, what I want to do is as follows. I'm looking into getting a new, getting a server. And I'm conscious that you want empty labeling and identifying labeling. Both of them. So I'm going to curate those or gather those on the cameras.
And now, and if slash when I do the server upgrade, the home server upgrade, and then I would move over to Freigate and then actually start using the train models.
Worst case scenario it's just $50 and I never actually end up using the stuff but I'm hoping that I will at some point.
\ No newline at end of file
+What is the most cost effective API you could have for... It would be taking a headshot and going image to video, synchronizing it with audio which is diarized. And it would have to be scripted because it's a 90 minute recording so there might be as much as 40 minutes of dialogue for each of the two speakers that would need to be animated.
Is there any API that can do it cost effectively for this amount and what would be the approximate cost?
\ No newline at end of file
diff --git a/transcripts/uncorrected/10.txt b/transcripts/uncorrected/10.txt
index 839741289e07ba1b0cbfd1312e40cff52a27de8a..2a58c5f30c61703af3ed4fd13d9b1c23315f1326 100644
--- a/transcripts/uncorrected/10.txt
+++ b/transcripts/uncorrected/10.txt
@@ -1 +1 @@
-I'd like to create a voice recording app for Ubuntu Linux. The app should have the following functionalities. It's a voice recorder, and it has the essential voice recording functionalities of record, pause, stop, and restart. The restart scraps the current recording in cache and restarts the recorder from zero.
For the transcription process, I'd like to institute the following workflow. We'll use Google Gemini API and ensure that we're using Gemini 2.5, which supports multimodal input, including audio. The recording captured from the user should be optimized for this purpose of voice capture for speech to text. By which I mean, I would suggest that we record in mono. We capture the recording in a space-efficient format. We're optimizing for creating a voice recording that is not necessarily the greatest and most detailed of audio clarity, but which strikes the best balance between quality and space efficiency for transcription.
The voice recording will get sent to Gemini for transcription with a system prompt that instructs it to transcribe and also clean up the recording by removing filler words, adding sentence structure, and adding spaces. There can be a second button which is called transcribe and optimize, and the transcribe and optimize workflow is the same except that the system prompt is a little bit more instructive and it tells Gemini, in addition to those steps, to remove filler words, add sentence structure, paragraph spacing, and try to optimize the text by adding headings, organizing the thoughts a bit, and removing repetition, so it's a little bit more aggressive.
In both cases, the transcription, when it returns from Gemini API, will populate into a... In fact, Gemini should return two things, a title and a text. The title is a short title for the voice note. The text is short is the text, and so on. The text is formatted in Markdown; it should appear within the Markdown within the text editor. There should also be a clipboard button, and finally, the user should be able to save the note.
When the user chooses to save the note, it will get saved to a predetermined folder which the user selects as where they save voice notes on their operating system. And it's saved there as a Markdown file with the title in machine-friendly format. So if the voice note title has spaces, the saved file name will just replace those with hyphens.
The app would be run repetitively such that if the user wants to record a new note, they start again, and when they do transcribe or transcribe and optimize, it will send and then overwrite the previous transcription. So the user has to click the save button in order to save it, or there can be an option for auto-saving configurable as a user setting.
\ No newline at end of file
+So I'd love to get your thoughts on the following. There's a tweet from Sam Altman that he wrote a few years ago and it's aged quite well as they say. He was announcing the release of ChatGPT and maybe an early iteration of ChatGPT, maybe 3 or 3.5 or something like that. Maybe even an earlier one. And the tweet went something like, it's our conversational, or first it's a conversational model or something.
And what's interesting to me about this is that I discovered AI through ChatGPT or got excited about it through that interface. And then from there worked back to more instructional workloads as then I used it as a chat interface, then began using LLMs through their API endpoints and then began using them programmatically and scripting and using them on my local computer. And now I doing much more of that than I am using them as chatbots.
I know a lot of people, I think even people who are pretty technically literate, aren't really aware that there's, that there's, AI can be used in this way. But what's interesting about that tweet I mentioned is it inferred that instructional models actually predate conversational models. In other words, that I think what he was saying was that OpenAI had developed GPT firstly for instruction following, and then they sort of refined it for conversation.
And what I'm curious to know is, is that accurate that instructional models predate conversational models and if so by sort of how long?
\ No newline at end of file
diff --git a/transcripts/uncorrected/11.txt b/transcripts/uncorrected/11.txt
index b7896c7f96af437ec44fecaba4cd587b9fd8c785..7ade92ea48527be48c9ed28805bb0153509bb3a1 100644
--- a/transcripts/uncorrected/11.txt
+++ b/transcripts/uncorrected/11.txt
@@ -1 +1 @@
-Okay, so the basic validation of the app is good. It functions according to spec.
I'd like to just remove the emojis and please take a look at the screenshots of the app as it's currently implemented and see if you can think of any design and UI optimizations that would make it even more friendly to use.
For transcribe and optimize, we definitely would like to have a label transcribe and optimize.
Maybe let's have a homer text or an about section where we describe to users the differences between these two functions.
\ No newline at end of file
+Here's my idea for an AI podcast workflow. I think if it's just questions summarized by AI and people know that the whole thing is text to speech, it's a little bit off-putting because people think I don't want to listen to just a robot speaking the whole time.
I think if the podcast format was that my voice prompt actually makes it into the final output so it starts with me recording a voice prompt as I'm doing now, then that gets transcribed. Then the rest of the workflow is the same, but what I do for the actual episode render is I combine my voice prompt with the AI response. So that you really get the feeling that it's me actually asking something that's definitely not AI. That I'm an identifiable person speaking. And then the podcast goes from there.
I think it would be more effective and more impressive and more enjoyable to listen to.
\ No newline at end of file
diff --git a/transcripts/uncorrected/12.txt b/transcripts/uncorrected/12.txt
index c4062c3c839f500b2242b1b7628a7ef9e4bd26f0..ed50ed359bc55372aab37746585a31f7525ccc9a 100644
--- a/transcripts/uncorrected/12.txt
+++ b/transcripts/uncorrected/12.txt
@@ -1 +1 @@
-I would like to create a docs folder in this repository.
The docs folder should be separate from the code and it will be the place in which documentation is gathered.
Ask the user if there is any specific functionalities or aspects of the application that the user wishes to document in this folder.
The docs folder should be mentioned and linked in the readme, directing users to it for more extensive documentation than can be found in the readme itself.
\ No newline at end of file
+I will try to build. What I want to build is this: I don't know, is there a name for this kind of workflow? So let's say I go out taking B-roll. Now, right now I'm using a lot of it for populating my own library, and sometimes I share it with stock libraries. And usually, they strip the sound. I like to have a workflow in which, well, my ideal workflow would probably be something like this.
Let's say I have a folder full of media and P4 files. I can usually end up with a few mistakes, unintentional takes, and those usually would be like kind of less than five seconds duration. Usually, I just eyeball and I look for the ones with a small file size that's too small. Next thing I like to do would be stripping out the audio, batching, putting the video into its own folder, and then maybe, because for stock I'm shooting it handheld, it should be stabilized. So, stabilization.
So it's basically a pipeline. And my question is this: can this be done? But if I want to build a few pipelines like this, this is, let's say, my stock video pipeline. I might have another pipeline for sorting, so I might have a few media pipelines, and I don't want to have to go every time into a repository and run it. But it does make sense that it's just a script, basically.
So what's the best way to have a few scripts? I'm basically asking what's a good GUI for this kind of workflow? I want to have my media folders, and then I want to say run this script within this folder, and that would take the TDM out of setting up and resetting up environments and Python and all the rest of it. So what would you recommend as a tool for doing that?
\ No newline at end of file
diff --git a/transcripts/uncorrected/13.txt b/transcripts/uncorrected/13.txt
index ec565a8b602b1abb235b4c8a5616370d701f5be7..acc8d62d6d5b71235676ccf824c7860bf8c12d53 100644
--- a/transcripts/uncorrected/13.txt
+++ b/transcripts/uncorrected/13.txt
@@ -1 +1 @@
-Please go through the markdown files in this repository to make sure that no emojis have been used.
If you find any emojis, remove them.
If emojis have been used in place of proper icons, then identify an appropriate icon library that could be used to provide the emojis.
Remember that if the icons are well known, such as the icons from major social networks, these should be integrated via a pre-designed library.
Do not attempt to create custom once-off SVGs for any logo that likely already exists in a professional library.
\ No newline at end of file
+I have a question here. I was exploring lately, getting up earlier, and it always really appealed to me. The idea of getting in sync with the sun, like the natural diurnal cycle. Stricadian rhythm, when the sun goes down approximately that's when you get ready for bed. When the sun comes up, that's maybe when you get ready, that's when you get up. But that would require, in the winter time at least, here, where I live, going to bed as early as, I mean I guess it depends. Whether you'd want to go to bed immediately at sundown, I think that's probably not realistic, and a couple of hours later. But even if you did the latter, you'd be talking about going to bed at like 8 o'clock in the winter, maybe as early as 7.
Now my question is, my interest in this really comes from a question I've always wondered or thought about, which is that until relatively recently there was no such thing as artificial illumination that you could click on with a switch in your home at least, and even the concept of street lighting being totally reliable and totally every street in a developed city being covered in street lighting, that was also a foreign concept. So in the evolution of humans, it seems to me it must be the case that this is a very recent adaptation.
So my question is really, from the historical record, what do we know about the kind of sleep cycle that humans gravitate to naturally when there isn't alternative lighting? Artificial lighting. Thanks for watching!
\ No newline at end of file
diff --git a/transcripts/uncorrected/14.txt b/transcripts/uncorrected/14.txt
index 97dc205e9d7b77068f580705263f66d3a0ce82b0..8a430e2e093e18c208dfeff13e05eabe999f06dd 100644
--- a/transcripts/uncorrected/14.txt
+++ b/transcripts/uncorrected/14.txt
@@ -1 +1 @@
-Go through the website and see any place in which icons have been implemented which were custom designed but which could have been implemented more efficiently through using an existing icon library.
Pay particular attention to icons for common uses such as social media icons which exist in many libraries, as well as emojis which may have been used in place of icons.
This approach should not be followed.
If the user uses an existing icon library that you can identify, then replace the custom coded icons with the most appropriate matches.
If the user hasn't yet implemented an icon library, provide some suggestions to the user, focusing on those libraries which will best match the aesthetic which they are following in their designs.
\ No newline at end of file
+I have a Nord 3 5G and I'm looking for a power bank. It supports this fast charging protocol. I think it's called SuperVOOC. And I was looking for a power bank that could basically charge it as quickly as possible, deliver the fastest charging that it can support from a non-AC outlet.
I got one from Bezeus before. I don't know what it was, it was 65W, I don't know if that's relevant for mainly smartphones or if it's just for laptops. But in any case, I think I've lost that power bank, so I need a new one.
Now I guess what I would probably like is the biggest capacity that you can fit into a power bank form factor. By which I mean, at a certain point, we're not really mobile, they make these power stations I think they're called. So the biggest thing you can get, and not an exaggerated spec but a real credible spec in terms of the mAh.
And the quickest, the combination of the quickest charging and the biggest capacity for this particular phone. Anything you'd recommend from Mosaic or other, let's say more credible manufacturers?
\ No newline at end of file
diff --git a/transcripts/uncorrected/15.txt b/transcripts/uncorrected/15.txt
index 0f9f01aeb1efa9b56a188dbecffed93a32cfd7c5..839741289e07ba1b0cbfd1312e40cff52a27de8a 100644
--- a/transcripts/uncorrected/15.txt
+++ b/transcripts/uncorrected/15.txt
@@ -1 +1 @@
-This repository contains a collection of slash commands which I use with Claudecode.
I capture some of the slash commands using speech to text.
The slash commands that have been captured with dictation frequently lack elements like punctuation, paragraph spacing, and they may contain occasionally words that were mistranscribed.
Please recurse through the directories and correct slash commands which you can find which were missing these basic textual features but do not limit your fixes to only I don't want to go into those containing these defects but rather consider in your editing any slash commands which need to be rewritten for optimal intelligibility.
\ No newline at end of file
+I'd like to create a voice recording app for Ubuntu Linux. The app should have the following functionalities. It's a voice recorder, and it has the essential voice recording functionalities of record, pause, stop, and restart. The restart scraps the current recording in cache and restarts the recorder from zero.
For the transcription process, I'd like to institute the following workflow. We'll use Google Gemini API and ensure that we're using Gemini 2.5, which supports multimodal input, including audio. The recording captured from the user should be optimized for this purpose of voice capture for speech to text. By which I mean, I would suggest that we record in mono. We capture the recording in a space-efficient format. We're optimizing for creating a voice recording that is not necessarily the greatest and most detailed of audio clarity, but which strikes the best balance between quality and space efficiency for transcription.
The voice recording will get sent to Gemini for transcription with a system prompt that instructs it to transcribe and also clean up the recording by removing filler words, adding sentence structure, and adding spaces. There can be a second button which is called transcribe and optimize, and the transcribe and optimize workflow is the same except that the system prompt is a little bit more instructive and it tells Gemini, in addition to those steps, to remove filler words, add sentence structure, paragraph spacing, and try to optimize the text by adding headings, organizing the thoughts a bit, and removing repetition, so it's a little bit more aggressive.
In both cases, the transcription, when it returns from Gemini API, will populate into a... In fact, Gemini should return two things, a title and a text. The title is a short title for the voice note. The text is short is the text, and so on. The text is formatted in Markdown; it should appear within the Markdown within the text editor. There should also be a clipboard button, and finally, the user should be able to save the note.
When the user chooses to save the note, it will get saved to a predetermined folder which the user selects as where they save voice notes on their operating system. And it's saved there as a Markdown file with the title in machine-friendly format. So if the voice note title has spaces, the saved file name will just replace those with hyphens.
The app would be run repetitively such that if the user wants to record a new note, they start again, and when they do transcribe or transcribe and optimize, it will send and then overwrite the previous transcription. So the user has to click the save button in order to save it, or there can be an option for auto-saving configurable as a user setting.
\ No newline at end of file
diff --git a/transcripts/uncorrected/16.txt b/transcripts/uncorrected/16.txt
index 35a25c66c27c2d44f0a64ca785442bcb2b03db07..b7896c7f96af437ec44fecaba4cd587b9fd8c785 100644
--- a/transcripts/uncorrected/16.txt
+++ b/transcripts/uncorrected/16.txt
@@ -1 +1 @@
-This repository contains a folder of screenshots.
The intended use of the screenshots is that they will be integrated into the README or other documentation to demonstrate the UI of the app.
It's important therefore that the screenshots have descriptive file names.
Please rename the screenshots for this purpose and integrate them into the README in the most appropriate section.
\ No newline at end of file
+Okay, so the basic validation of the app is good. It functions according to spec.
I'd like to just remove the emojis and please take a look at the screenshots of the app as it's currently implemented and see if you can think of any design and UI optimizations that would make it even more friendly to use.
For transcribe and optimize, we definitely would like to have a label transcribe and optimize.
Maybe let's have a homer text or an about section where we describe to users the differences between these two functions.
\ No newline at end of file
diff --git a/transcripts/uncorrected/17.txt b/transcripts/uncorrected/17.txt
index c3e6aec46313e6c703697e4fcc48f050db3015c1..c4062c3c839f500b2242b1b7628a7ef9e4bd26f0 100644
--- a/transcripts/uncorrected/17.txt
+++ b/transcripts/uncorrected/17.txt
@@ -1 +1 @@
-What's the most professional way to install a package on Linux? If I create an executable and copy that into the directory on path, such that I can call it, is that considered a worse way to install applications than through a Debian package?
\ No newline at end of file
+I would like to create a docs folder in this repository.
The docs folder should be separate from the code and it will be the place in which documentation is gathered.
Ask the user if there is any specific functionalities or aspects of the application that the user wishes to document in this folder.
The docs folder should be mentioned and linked in the readme, directing users to it for more extensive documentation than can be found in the readme itself.
\ No newline at end of file
diff --git a/transcripts/uncorrected/18.txt b/transcripts/uncorrected/18.txt
index 72dd47f2927e95f6a555120604796efb0f7010e8..ec565a8b602b1abb235b4c8a5616370d701f5be7 100644
--- a/transcripts/uncorrected/18.txt
+++ b/transcripts/uncorrected/18.txt
@@ -1 +1 @@
-Your task is to take this system prompt and rewrite it for implementation in a structured AI system.
In order to do so, adhere to the following instructions.
Within the text of the prompt itself, define the The JSON output that the AI should be constrained to giving.
And instruct the AI tool that it is working in a structured workflow and must only return valid JSON.
Create a folder for the prompt.
And add in addition to the rewritten prompt text.
You should also create a .json file containing an Open API compliant JSON schema and finally and you create another JSON called object.json which contains just the JSON object.
\ No newline at end of file
+Please go through the markdown files in this repository to make sure that no emojis have been used.
If you find any emojis, remove them.
If emojis have been used in place of proper icons, then identify an appropriate icon library that could be used to provide the emojis.
Remember that if the icons are well known, such as the icons from major social networks, these should be integrated via a pre-designed library.
Do not attempt to create custom once-off SVGs for any logo that likely already exists in a professional library.
\ No newline at end of file
diff --git a/transcripts/uncorrected/19.txt b/transcripts/uncorrected/19.txt
index 76af9ed38a7f3a464480738293afb78a25ff5929..97dc205e9d7b77068f580705263f66d3a0ce82b0 100644
--- a/transcripts/uncorrected/19.txt
+++ b/transcripts/uncorrected/19.txt
@@ -1 +1 @@
-Okay, so here is the type of license that generally work for me for open source projects. I usually open source software because I've created something useful. I think other people might either find it helpful or develop upon the idea to do it to take my idea and ability further. Attribution is always appreciated but I'd only want to make it mandatory if that wouldn't really sort of create friction with other people who'd like to use a project.
But attribution really helps me because it opens up the relationship and connectedness of open sourcing because if someone were to use it downstream, they have a way to sort of get in touch with me. People commercializing open source software doesn't sit very well with me, but again, it's only if it's, I'd be very reluctant to add that as a limitation.
Other than that, nothing else really stands out to me as something that I'd require. Like if people took it in any other direction, it's fine. The only one I think about sometimes is obviously no one wants something that creates to be sort of misused or used for harm. And one also doesn't want to end up with lawsuits if something they create is misused, so I don't know if there's any legal language that can create a little bit of protection around those potentials.
\ No newline at end of file
+Go through the website and see any place in which icons have been implemented which were custom designed but which could have been implemented more efficiently through using an existing icon library.
Pay particular attention to icons for common uses such as social media icons which exist in many libraries, as well as emojis which may have been used in place of icons.
This approach should not be followed.
If the user uses an existing icon library that you can identify, then replace the custom coded icons with the most appropriate matches.
If the user hasn't yet implemented an icon library, provide some suggestions to the user, focusing on those libraries which will best match the aesthetic which they are following in their designs.
\ No newline at end of file
diff --git a/transcripts/uncorrected/2.txt b/transcripts/uncorrected/2.txt
index 57ee9e7328b60a23b4d9d39ea97021e9d3ff8e2d..23d92bc8dc9bfa4e504b3ce95a3ba1c52d761928 100644
--- a/transcripts/uncorrected/2.txt
+++ b/transcripts/uncorrected/2.txt
@@ -1 +1 @@
-So, I have a question. For image to video, it's currently expensive, very expensive actually. I'm trying to find a way. So I found the WAN models, which are by Alibaba. I find them to be very good, and they have a more affordable WAN model that I like using. And when I'm doing a video, I frequently gather up my images, gather up my prompts, and I move in towards a workflow by which I kind of do the storyboarding, gather the source material as I call it, the photos. Gather the prompt together, and then I will run it as a script, which is a very novel way for me of approaching content creation in the sense that it's programmatic and it's code first.
Which is a strange way to approach a creative process, but it works. And it seems to me at the moment to be the most effective way to do this because otherwise, before this, I was using a playground, running them one by one, importing them to a video editor, and it's just a lot slower that way. Now the issue is that image to video, as I mentioned, is expensive. And if I'm doing these projects for fun, I have a lot of ideas I want to do for fun. But even the cheaper WAN models are in the region of 10 to 15 cents per generation, which could easily, it's very easy to go through 20 or even 50 dollars, especially given the fact that frequently you need to generate the same prompt multiple times before you get a satisfactory result.
I really, really want to explore image to video, and I'm trying to find a way to have an affordable way to play around with it even if it's not the best model. And you know, so what I've been thinking of is I come across for a while providers like RunPod who do make GPU available either in serverless functions or they do per hour pricing on GPUs. And since I discovered Replicate and FAL, I've kind of wondered, well, if you can just make an API call, why go to the trouble of managing an instance of a machine? I'm thinking now that it might be the cost reason that if the machines are a certain price per hour, it might actually be a lot more cost-effective than using an API.
So my question is, firstly, is that the case? Is a frequent reason that people actually do these or use these services for cost mitigation? And so on. So that's the first thing. Secondly, serverless versus pods as RunPod calls them. I guess serverless almost makes more sense to me because you just pay for what you use and you don't need to worry about starting and stopping the pod and configuring auto shutdown policies. So what’s the reason that people go for pods over serverless?
And finally, if I want to do this, probably the objective would be, is there a way that you can have like your own API endpoint and that's running stuff on the serverless function in the backend? And what I get confused about for these things, the first time I did it, if I'm not mistaken, I did it with video generation. The video actually generated on my local, which seems almost like magic to me. So you're doing the actual inference rendering in the cloud. And is it just the case when that happens? And so on. And then just running my script and then I'm using on-demand compute.
\ No newline at end of file
+What is the safety of drinking water from a bathroom faucet in Israel specifically? Is it in an area where you know the water is no issues with water supply, it's a residential apartment? Is the water always legally potable?
\ No newline at end of file
diff --git a/transcripts/uncorrected/20.txt b/transcripts/uncorrected/20.txt
index b57417cde1c5303a489404bcd259f827ea2cf7a6..0f9f01aeb1efa9b56a188dbecffed93a32cfd7c5 100644
--- a/transcripts/uncorrected/20.txt
+++ b/transcripts/uncorrected/20.txt
@@ -1 +1 @@
-The problem is that we looked at this before and when it reboots the router it's not bringing up the Cloudflare tunnel.
So see it's working now, but just see what can be done to make sure that this, we need to make very certain that this does start automatically on reboot.
\ No newline at end of file
+This repository contains a collection of slash commands which I use with Claudecode.
I capture some of the slash commands using speech to text.
The slash commands that have been captured with dictation frequently lack elements like punctuation, paragraph spacing, and they may contain occasionally words that were mistranscribed.
Please recurse through the directories and correct slash commands which you can find which were missing these basic textual features but do not limit your fixes to only I don't want to go into those containing these defects but rather consider in your editing any slash commands which need to be rewritten for optimal intelligibility.
\ No newline at end of file
diff --git a/transcripts/uncorrected/21.txt b/transcripts/uncorrected/21.txt
index 4b73fda258009e56d8fc1e8ade93312193c751d0..35a25c66c27c2d44f0a64ca785442bcb2b03db07 100644
--- a/transcripts/uncorrected/21.txt
+++ b/transcripts/uncorrected/21.txt
@@ -1 +1 @@
-I recently picked up a Samsung Galaxy 6 smartwatch just to try out the idea basically.
And my only need was really for a dual time display, local and UTC, and the day display.
It was about $100 give or take, so a very basic entry level that would sync with my OnePlus.
If it turns out that I really like it...
The other requirement was a good microphone for voice recordings.
Even if it's not the best and my phone is better, it would be nice to be able to use it for that because I take a lot of voicemails during the day.
If I turn out to really like it, what would you suggest as a good upgrade?
I tend to like more everything that's getting under the hood with technology.
So I wasn't thrilled about buying a Samsung, but it was what was available for the price point approximately.
\ No newline at end of file
+This repository contains a folder of screenshots.
The intended use of the screenshots is that they will be integrated into the README or other documentation to demonstrate the UI of the app.
It's important therefore that the screenshots have descriptive file names.
Please rename the screenshots for this purpose and integrate them into the README in the most appropriate section.
\ No newline at end of file
diff --git a/transcripts/uncorrected/22.txt b/transcripts/uncorrected/22.txt
index dd20ba87d4e27f321810d0504c0736c4e154d407..c3e6aec46313e6c703697e4fcc48f050db3015c1 100644
--- a/transcripts/uncorrected/22.txt
+++ b/transcripts/uncorrected/22.txt
@@ -1 +1 @@
-I recently picked up a smartwatch from Samsung Galaxy and I'm curious one thing that would be really helpful that I thought of.
I'm always stressed about losing or potentially losing phone wallet keys.
And for all of these things, Fun Walla Keys, I use Pebble Bee Tracker now.
So I'm wondering if there's any way or app that can do something like geofencing in which if any of the things are...
Maybe you can turn it on and off at certain times but they're in.
If they move out of the zone you get an alert notification if the smartwatch vibrates or whatever.
\ No newline at end of file
+What's the most professional way to install a package on Linux? If I create an executable and copy that into the directory on path, such that I can call it, is that considered a worse way to install applications than through a Debian package?
\ No newline at end of file
diff --git a/transcripts/uncorrected/23.txt b/transcripts/uncorrected/23.txt
index 1c658e5f3d7436116c6a372301158c4d76aff497..72dd47f2927e95f6a555120604796efb0f7010e8 100644
--- a/transcripts/uncorrected/23.txt
+++ b/transcripts/uncorrected/23.txt
@@ -1 +1 @@
-Something that would be very useful would be the following. So I use an app called Voice Notes for Android. And it's a voice recording app. It's called Voice Notes. Now, it has one fatal flaw, in my opinion, which is that it doesn't have Bluetooth support. So when I'm out and about, like now, I literally hold the phone up to my mouth, and it certainly gets me much, much, much better recording quality, but I kind of look a little bit goofy and I feel very self-conscious.
So there's two things I've thought about. One is finding a voice recording app that has more robust Bluetooth support. I think there are two options really that I'm thinking of. The first is finding, as I said, a voice recorder with very robust Bluetooth support and using a Bluetooth microphone to record with. The alternative, because I'm seeing these products come to market increasingly, is to use a wearable Android device, which probably wouldn't be that different, maybe even physically. And I think the more I think about it, the more I think about it, the more I think about it, the more flexibility. Rather than being a Bluetooth accessory, it's running, I guess, Wear OS, and maybe that would give you more flexibility.
I'm trying to think of the pros and cons on which would be better. I veered towards the wearable approach as it seems to be what's where. I don't know where the market is going with this concept, but I'm curious to know what your thoughts are regarding the pros and cons.
\ No newline at end of file
+Your task is to take this system prompt and rewrite it for implementation in a structured AI system.
In order to do so, adhere to the following instructions.
Within the text of the prompt itself, define the The JSON output that the AI should be constrained to giving.
And instruct the AI tool that it is working in a structured workflow and must only return valid JSON.
Create a folder for the prompt.
And add in addition to the rewritten prompt text.
You should also create a .json file containing an Open API compliant JSON schema and finally and you create another JSON called object.json which contains just the JSON object.
\ No newline at end of file
diff --git a/transcripts/uncorrected/24.txt b/transcripts/uncorrected/24.txt
index 59abde81206328bbd33b6fe792b0dcf161a7d148..76af9ed38a7f3a464480738293afb78a25ff5929 100644
--- a/transcripts/uncorrected/24.txt
+++ b/transcripts/uncorrected/24.txt
@@ -1 +1 @@
-So there's a lot of these AI voice pins emerging onto the market which are designed to be wearable devices.
So I record as I'm doing now quite a number of voice notes when I'm out of the house.
I use an Android app called Voice Notes that I really like but it doesn't have support yet for Bluetooth microphones.
At least not support that's reliable.
So I have to hold the phone up to my mouth, which really kind of degrades the experience.
As I started, I want to actually start doing, going on walks expressly for usually the moment I do this when I'm going places.
I just happen to think, but I actually want to start taking walks to jot down some ideas as a healthier way of combining work and getting out and getting some exercise and getting some sunlight.
And for that it would be really nice to not have to, you know, be holding up a phone to your mouth for 30 minutes or an hour or whatever it may be.
So I was thinking about wearable voice recorders but a lot of them from what I've seen are these kind of closed ecosystems in which they sell you can't just buy the hardware.
They'll sell you, they'll do like onboard transcription or they'll sell you like a Cloud Transcription Bundle.
I'm really not a fan of on-device transcription.
I mean I think it works but in my experience it doesn’t make a lot of sense to me just architecturally.
I think why do stuff on device that can be done in the cloud cost effectively?
And you got, you know, you can run vastly more powerful models in the cloud.
You don’t have to worry about quantizing models on a very, very small piece of hardware.
And so I guess what would be great for me, but Android, when you're looking at wearables, Android's like the obvious sync partner.
So you just need to get the voice of the audio data from the recording thing to Android and from there you can push to the cloud and then the rest is back-end speech and text.
So what I'm saying is that I'd love a modular solution that could do this.
A pin that is just hardware, just recording this audio sync, maybe has its own app, or maybe can be used preferably with third-party apps.
And therefore you can kind of build your own voice recording stack around it, and you can use your existing Speech-to-Docs transcription workflow.
And you don't have to subscribe to these very kind of, I forget the word, walled gardens in which the vendor chooses your force into this package that's often very unnecessarily expensive and you're paying mostly for overpriced transcription.
I'd prefer to just get, invest in good hardware!
\ No newline at end of file
+Okay, so here is the type of license that generally work for me for open source projects. I usually open source software because I've created something useful. I think other people might either find it helpful or develop upon the idea to do it to take my idea and ability further. Attribution is always appreciated but I'd only want to make it mandatory if that wouldn't really sort of create friction with other people who'd like to use a project.
But attribution really helps me because it opens up the relationship and connectedness of open sourcing because if someone were to use it downstream, they have a way to sort of get in touch with me. People commercializing open source software doesn't sit very well with me, but again, it's only if it's, I'd be very reluctant to add that as a limitation.
Other than that, nothing else really stands out to me as something that I'd require. Like if people took it in any other direction, it's fine. The only one I think about sometimes is obviously no one wants something that creates to be sort of misused or used for harm. And one also doesn't want to end up with lawsuits if something they create is misused, so I don't know if there's any legal language that can create a little bit of protection around those potentials.
\ No newline at end of file
diff --git a/transcripts/uncorrected/25.txt b/transcripts/uncorrected/25.txt
index 65b441258e93b436681d73b0928dd3ea5da97777..b57417cde1c5303a489404bcd259f827ea2cf7a6 100644
--- a/transcripts/uncorrected/25.txt
+++ b/transcripts/uncorrected/25.txt
@@ -1 +1 @@
-I picked up a Samsung Galaxy FE watch. I checked compatibility, smartwatch. I think it's in the 7 series if I'm not mistaken. What is it exactly? It's a 40mm smartwatch. Where does it fit in their line up? What's the difference between this and the Watch 7? I just went for this one because it was what was in stock.
And is it shower proof, waterproof? And I know it's a glass display. So I'm wondering how tough is the glass? Or is it tough at all? I just asked because it's a fitness watch. I assume they make them a little bit more ruggedized, but maybe that's not the case. What does it say?
\ No newline at end of file
+The problem is that we looked at this before and when it reboots the router it's not bringing up the Cloudflare tunnel.
So see it's working now, but just see what can be done to make sure that this, we need to make very certain that this does start automatically on reboot.
\ No newline at end of file
diff --git a/transcripts/uncorrected/26.txt b/transcripts/uncorrected/26.txt
index b51e9cf9eacfa8f539ba2c6270fbbbdcb80adeda..4b73fda258009e56d8fc1e8ade93312193c751d0 100644
--- a/transcripts/uncorrected/26.txt
+++ b/transcripts/uncorrected/26.txt
@@ -1 +1 @@
-So there has been this vast development in multimodal AI recently. I signed up for Replicate and FAL AI. And what really strikes me is not only the diversity and number of models out there, but also the large number of permutations in multimodal AI, meaning what input can go to what output. And I think what I find difficult about it at the moment to navigate as a, let's say, creator. I created a few music videos just as kind of fun experiments. Is that there's so many different models. Like just in, let's say, the one series, there is maybe 20 different models to choose from in FAL, but they all do slightly different things not only in terms of the resolution and the parameter and the max duration but also in terms of the modalities, and they don’t really allow you to filter on this at the moment.
So what I mean by that is if we take an image to video model that animates still images to video, one model in one might create video without audio and another might create video with audio. And that's a very significant difference. But there's also a significant difference in do I prompt for the audio? In other words, is it going to be text to audio and render out audio that then gets added to video? Is it reference audio and reference image? So when you begin opening, all these differences really matter because I might want to filter on ideally, let's say I wanted to look at image to video models, which could generate lip sync to audio from a prompt. That might be one use case as well as the video.
In another use case, I might want to create a dialogue video. Let’s say I have a still image of a crowded market in Jerusalem, and I might want to print something like create a video from this image; the background soundtrack is background conversation noise in a bustling marketplace with vendors yelling out sales prices. That's just an example of the kind of background noise and the ambient noise that we have in this market I'm thinking about.
So what I would like to do, I created this repository which I created here. I'm trying to think of a taxonomy for multimodal, really for my own reference, but also as an open source project. Exploring the permutations of multimodal that are possible. So in the preceding example, we might have one definition of a modality might be still image to video without audio. Another modality, and then the description. Another modality might be still image to audio without lip sync. Another modality might be still image to video with lip sync.
But then you might have some sub modalities being still image to video with lip sync with reference image, that a reference to image. Another sub modality there might be still image to video with reference character reference in video. Another might be still image to video with audio with character reference through a LoRa (L-O-R-A). And I reckon that if we really enumerated the modalities we might get to hundreds if not thousands of different ones. For example, in FAL, just to talk about the long tail, there's music to music, which is music in painting. There's audio in painting, well, yeah, audio in painting, which I'm thinking aloud here is, I guess, distinguished music in painting is a subset of audio in painting, that it's melodic.
So that's the objective. I think that the JSON is the obvious format in which to attempt to denote these. And what I'd like you to do as the task definition is try to do this basically. Try to enumerate, list out a hierarchy, some kind of taxonomy representation that makes sense. We could try to create a baseline and then explore various ways of mapping out the hierarchy, manipulating the JSON so that we look at different ways of organizing it. So I think it would be useful to have like a first entry JSON in which we, and later maybe I, as new modalities come to, and we can maybe have very interesting labels might be their point of maturity, example workflows, use cases, etc. There's an awful lot that could be explored within these parameters.
\ No newline at end of file
+I recently picked up a Samsung Galaxy 6 smartwatch just to try out the idea basically.
And my only need was really for a dual time display, local and UTC, and the day display.
It was about $100 give or take, so a very basic entry level that would sync with my OnePlus.
If it turns out that I really like it...
The other requirement was a good microphone for voice recordings.
Even if it's not the best and my phone is better, it would be nice to be able to use it for that because I take a lot of voicemails during the day.
If I turn out to really like it, what would you suggest as a good upgrade?
I tend to like more everything that's getting under the hood with technology.
So I wasn't thrilled about buying a Samsung, but it was what was available for the price point approximately.
\ No newline at end of file
diff --git a/transcripts/uncorrected/27.txt b/transcripts/uncorrected/27.txt
index 7691086737e7862b23604ec7c3b5a56071521899..dd20ba87d4e27f321810d0504c0736c4e154d407 100644
--- a/transcripts/uncorrected/27.txt
+++ b/transcripts/uncorrected/27.txt
@@ -1 +1 @@
-Look at the Facer's, I'm really surprised for no one's made a Hebrew date watch on the Facer creator, but it's probably the developer studio from Samsung is the way to go for that. And I want to edit, like the one that I have slightly, I can't find the perfect one, people put too much on them. I'm looking at the face I got from Facer now and they've added temperature, sunrise, sunset, neither of which work, I guess the integrations don't work, but who wants that on their watch? These are all like anti-simplicity. I just want... It's almost perfect, but they added these stupid unnecessary features.
Maybe on the Facer creator marketplace, I can just create one that I want. Maybe that will actually work. That's probably the easiest way to go. But if that doesn't work, I can create one on Github and open sources, the font that I want, but the Hebrew one would be very special to me. It's definitely possible.
I'm looking at my desktop display. It says 30 Tishra 5786. So for sure from the HIPAA Cal API the data source is there. And I looked last night and it seemed that people only had created sort of ones for from a very different reason.
The VoiceNote data set I really want to create as well. That's actually a very important project, the GUI for adding that I have a backlog of literally thousands and it would form the basis for my classification model which I should probably note out and that's a real model I can build for the idea as well.
\ No newline at end of file
+I recently picked up a smartwatch from Samsung Galaxy and I'm curious one thing that would be really helpful that I thought of.
I'm always stressed about losing or potentially losing phone wallet keys.
And for all of these things, Fun Walla Keys, I use Pebble Bee Tracker now.
So I'm wondering if there's any way or app that can do something like geofencing in which if any of the things are...
Maybe you can turn it on and off at certain times but they're in.
If they move out of the zone you get an alert notification if the smartwatch vibrates or whatever.
\ No newline at end of file
diff --git a/transcripts/uncorrected/28.txt b/transcripts/uncorrected/28.txt
index eaea5b9166faabd9642d0c97478ecd6f6fd86d89..1c658e5f3d7436116c6a372301158c4d76aff497 100644
--- a/transcripts/uncorrected/28.txt
+++ b/transcripts/uncorrected/28.txt
@@ -1 +1 @@
-Okay, so I've just configured. VS Code is very, very important. I've just configured automatic updates, and I asked Claude, I said, why am I not getting them? Why do I, it says, you're out of date, download the Debian. And I said, I don't want to have to download a Debian every time, and I really want to keep this updated.
So it says, you should know, you need to join the Microsoft ASC, their repo, their third-party repo, which I had before then I think because I removed it as a duplicate.
So to clarify, it's not the case that you need to do this process. It is actually an automatic upgrade thing but you do need to be attached to the Microsoft repo to get those.
\ No newline at end of file
+Something that would be very useful would be the following. So I use an app called Voice Notes for Android. And it's a voice recording app. It's called Voice Notes. Now, it has one fatal flaw, in my opinion, which is that it doesn't have Bluetooth support. So when I'm out and about, like now, I literally hold the phone up to my mouth, and it certainly gets me much, much, much better recording quality, but I kind of look a little bit goofy and I feel very self-conscious.
So there's two things I've thought about. One is finding a voice recording app that has more robust Bluetooth support. I think there are two options really that I'm thinking of. The first is finding, as I said, a voice recorder with very robust Bluetooth support and using a Bluetooth microphone to record with. The alternative, because I'm seeing these products come to market increasingly, is to use a wearable Android device, which probably wouldn't be that different, maybe even physically. And I think the more I think about it, the more I think about it, the more I think about it, the more flexibility. Rather than being a Bluetooth accessory, it's running, I guess, Wear OS, and maybe that would give you more flexibility.
I'm trying to think of the pros and cons on which would be better. I veered towards the wearable approach as it seems to be what's where. I don't know where the market is going with this concept, but I'm curious to know what your thoughts are regarding the pros and cons.
\ No newline at end of file
diff --git a/transcripts/uncorrected/29.txt b/transcripts/uncorrected/29.txt
index ffc57e5992be591a97dbd7ee169ed839fe73e975..59abde81206328bbd33b6fe792b0dcf161a7d148 100644
--- a/transcripts/uncorrected/29.txt
+++ b/transcripts/uncorrected/29.txt
@@ -1 +1 @@
-I want to add to my DSR Holdings a LLM store TXT. It's almost a pity I didn't talk about this with Shlomo, but a radical idea. It actually, I mean, it appears to be working. I don't know if you're sure where I read from if it just parts my home page or read the txt but I asked Claude to pull in some context data about me into the into the file it seemed to work really well so what the thought I had for I mentioned Shlomo and what I thought about for myself is inbound LLM marketing considering AI traffic.
It's a pity I didn't take some in fact I'll add to the DAM a screenshots folder because a perfect example of a screenshot was the last time that I saw a and I sure I see them almost every day A sign up form where they didn ask for was the LLM your referral source I think it's absolutely insanity that anyone, any company would not have LLM as top of their list of referral sources for traffic.
And this opens up a whole world actually of LLM analytics. and you see which LLMs are scraping our site. LLM optimization. And then basically the idea of being LLM as an inbound pipeline. If you did all this well, could you actually view large language models as an inbound traffic source saying Google's dead, LLM is where it's at.
Here's how you can, I mean, I would have to try these approaches on my own site, but all I can do there is keep optimizing and see if someone says, if you typed into ChatGPT in a month and said, I need someone who's good with AI in Jerusalem, Israel. Can you find any profiles? And if it worked, that would almost be the opposite to pursue the outbound track as well for jobs. But as a complementary angle of attack, I think it would be very interesting to see as an experiment even.
\ No newline at end of file
+So there's a lot of these AI voice pins emerging onto the market which are designed to be wearable devices.
So I record as I'm doing now quite a number of voice notes when I'm out of the house.
I use an Android app called Voice Notes that I really like but it doesn't have support yet for Bluetooth microphones.
At least not support that's reliable.
So I have to hold the phone up to my mouth, which really kind of degrades the experience.
As I started, I want to actually start doing, going on walks expressly for usually the moment I do this when I'm going places.
I just happen to think, but I actually want to start taking walks to jot down some ideas as a healthier way of combining work and getting out and getting some exercise and getting some sunlight.
And for that it would be really nice to not have to, you know, be holding up a phone to your mouth for 30 minutes or an hour or whatever it may be.
So I was thinking about wearable voice recorders but a lot of them from what I've seen are these kind of closed ecosystems in which they sell you can't just buy the hardware.
They'll sell you, they'll do like onboard transcription or they'll sell you like a Cloud Transcription Bundle.
I'm really not a fan of on-device transcription.
I mean I think it works but in my experience it doesn’t make a lot of sense to me just architecturally.
I think why do stuff on device that can be done in the cloud cost effectively?
And you got, you know, you can run vastly more powerful models in the cloud.
You don’t have to worry about quantizing models on a very, very small piece of hardware.
And so I guess what would be great for me, but Android, when you're looking at wearables, Android's like the obvious sync partner.
So you just need to get the voice of the audio data from the recording thing to Android and from there you can push to the cloud and then the rest is back-end speech and text.
So what I'm saying is that I'd love a modular solution that could do this.
A pin that is just hardware, just recording this audio sync, maybe has its own app, or maybe can be used preferably with third-party apps.
And therefore you can kind of build your own voice recording stack around it, and you can use your existing Speech-to-Docs transcription workflow.
And you don't have to subscribe to these very kind of, I forget the word, walled gardens in which the vendor chooses your force into this package that's often very unnecessarily expensive and you're paying mostly for overpriced transcription.
I'd prefer to just get, invest in good hardware!
\ No newline at end of file
diff --git a/transcripts/uncorrected/3.txt b/transcripts/uncorrected/3.txt
index a2ad0808542f04e9e26405fe883f5a3a95fa8ce7..5a603b326fb72f83aeac0c5684079b32131729b6 100644
--- a/transcripts/uncorrected/3.txt
+++ b/transcripts/uncorrected/3.txt
@@ -1 +1 @@
-Yeah, I think I would look for... the truth is, I was initially... I have to try out my Cherry Red keyboard, the split one is a long term thing. But in the short term I have to say I've really warmed to MX Brown, and I think at this point I probably would use any MX Brown keyboard without noticing much of a difference from the AliExpress one, which is a brown imitation.
And this frankly one is it's a wired one and what I would like probably I'm thinking at the moment I wanted to set up a binding for cloud code and I think that rather than go down in the macro pad approach, which is one way, one approach certainly, it would be really nice to have a keyboard with built-in macro keys.
I think the MX Red one that I got has about five macro keys and I'm wondering if you can put about, you know, if you put up the entire top of the keyboard or the number pad, which I'm looking at the keyboard now. A lot of the keys that I rarely use are the sound controls, the number operators, pause, scroll lock, print screen. There's probably about 20% of the keyboard that I rarely touch.
Do you have any recommendations for a brown keyboard? Let's say I don't like compact keyboards, so I do like the full-size keyboard. The small keyboards feel cramped to me, but that has a full keyboard section and then maybe fills up some space on the right and along the top with macro keys, and so that rather than adding on micro pads you can just create some assignments on the keyboard itself.
\ No newline at end of file
+They're getting a macro pad or control surface that can be used with Kdenlive by mapping to the keyboard shortcuts that already exist and has three, specifically for color correction.
I think any QMK, whatever it's called, thing could be adapted for it.
\ No newline at end of file
diff --git a/transcripts/uncorrected/30.txt b/transcripts/uncorrected/30.txt
index e9383aa5db79a22c214793ffdd4a93fc6ed49a60..65b441258e93b436681d73b0928dd3ea5da97777 100644
--- a/transcripts/uncorrected/30.txt
+++ b/transcripts/uncorrected/30.txt
@@ -1 +1 @@
-Can I just make a suggestion? Before we proceed in this direction, I think that it definitely is the right content environment. But the reason I've created these is so that we have them ready for recurrent use. So Lama Index is very, very good and would be used for a lot of very versatile.
So before we start, let's update the cond environment to install all the different utilities we might need for tokenizing text, processing markdown, markdown to PDF, PDF splitting, all these different text utilities. Even ImageMagick typesetting utilities. Once we have that ready then we can begin. But let's get that environment good first if we can use a conda.yaml to define it.
In other words, take in the existing environment, make a few edits and then install that. Just remember there's an AMD GPU so it will affect the choice of packages.
\ No newline at end of file
+I picked up a Samsung Galaxy FE watch. I checked compatibility, smartwatch. I think it's in the 7 series if I'm not mistaken. What is it exactly? It's a 40mm smartwatch. Where does it fit in their line up? What's the difference between this and the Watch 7? I just went for this one because it was what was in stock.
And is it shower proof, waterproof? And I know it's a glass display. So I'm wondering how tough is the glass? Or is it tough at all? I just asked because it's a fitness watch. I assume they make them a little bit more ruggedized, but maybe that's not the case. What does it say?
\ No newline at end of file
diff --git a/transcripts/uncorrected/31.txt b/transcripts/uncorrected/31.txt
index 68f0272363ffede253054f91243a4d0b8203d19b..b51e9cf9eacfa8f539ba2c6270fbbbdcb80adeda 100644
--- a/transcripts/uncorrected/31.txt
+++ b/transcripts/uncorrected/31.txt
@@ -1 +1 @@
-Okay, here's just a few more specific things that I want to include. So I see you mentioning hydration drinks, which is very important. Electrolyte tablets become very expensive. So there's a few things I'd like to explore. More cost-effective ways for making them. I think you can buy them as a dry powder is one idea. The second one is a homemade recipe.
The next set of ideas is I really really need to always have some kind of food stuff at home ready to eat. So there's a few things in that regard. A list of a kind of basic pantry shopping list. Obviously optimized for all the dietary recommendations we've discussed here. Suggestions for, and I think protein bars aren't really enough, it needs to be carbohydrate as well. Recipes or suggestions for homemade protein bars for the same reason that they become very expensive to buy them individually.
That's probably the key thing I'm looking for at the moment is to have always on hand the ingredients and ideally like kind of a backup layer like I kind of make these protein bars but I also and that's kind of the fallback but ideally I prefer to obviously eat and so on.
\ No newline at end of file
+So there has been this vast development in multimodal AI recently. I signed up for Replicate and FAL AI. And what really strikes me is not only the diversity and number of models out there, but also the large number of permutations in multimodal AI, meaning what input can go to what output. And I think what I find difficult about it at the moment to navigate as a, let's say, creator. I created a few music videos just as kind of fun experiments. Is that there's so many different models. Like just in, let's say, the one series, there is maybe 20 different models to choose from in FAL, but they all do slightly different things not only in terms of the resolution and the parameter and the max duration but also in terms of the modalities, and they don’t really allow you to filter on this at the moment.
So what I mean by that is if we take an image to video model that animates still images to video, one model in one might create video without audio and another might create video with audio. And that's a very significant difference. But there's also a significant difference in do I prompt for the audio? In other words, is it going to be text to audio and render out audio that then gets added to video? Is it reference audio and reference image? So when you begin opening, all these differences really matter because I might want to filter on ideally, let's say I wanted to look at image to video models, which could generate lip sync to audio from a prompt. That might be one use case as well as the video.
In another use case, I might want to create a dialogue video. Let’s say I have a still image of a crowded market in Jerusalem, and I might want to print something like create a video from this image; the background soundtrack is background conversation noise in a bustling marketplace with vendors yelling out sales prices. That's just an example of the kind of background noise and the ambient noise that we have in this market I'm thinking about.
So what I would like to do, I created this repository which I created here. I'm trying to think of a taxonomy for multimodal, really for my own reference, but also as an open source project. Exploring the permutations of multimodal that are possible. So in the preceding example, we might have one definition of a modality might be still image to video without audio. Another modality, and then the description. Another modality might be still image to audio without lip sync. Another modality might be still image to video with lip sync.
But then you might have some sub modalities being still image to video with lip sync with reference image, that a reference to image. Another sub modality there might be still image to video with reference character reference in video. Another might be still image to video with audio with character reference through a LoRa (L-O-R-A). And I reckon that if we really enumerated the modalities we might get to hundreds if not thousands of different ones. For example, in FAL, just to talk about the long tail, there's music to music, which is music in painting. There's audio in painting, well, yeah, audio in painting, which I'm thinking aloud here is, I guess, distinguished music in painting is a subset of audio in painting, that it's melodic.
So that's the objective. I think that the JSON is the obvious format in which to attempt to denote these. And what I'd like you to do as the task definition is try to do this basically. Try to enumerate, list out a hierarchy, some kind of taxonomy representation that makes sense. We could try to create a baseline and then explore various ways of mapping out the hierarchy, manipulating the JSON so that we look at different ways of organizing it. So I think it would be useful to have like a first entry JSON in which we, and later maybe I, as new modalities come to, and we can maybe have very interesting labels might be their point of maturity, example workflows, use cases, etc. There's an awful lot that could be explored within these parameters.
\ No newline at end of file
diff --git a/transcripts/uncorrected/32.txt b/transcripts/uncorrected/32.txt
index b373213f419ec9b2e4b9ca165f42170441577ed2..7691086737e7862b23604ec7c3b5a56071521899 100644
--- a/transcripts/uncorrected/32.txt
+++ b/transcripts/uncorrected/32.txt
@@ -1 +1 @@
-Okay there's a bunch of memory layer projects now to explore later that are actually it's not longer separation between vector storage and memory which makes sense because it's kind of basically the same server it's offered by API mem0 super memory remember api memories.api that's a good starter list and they can all be integrated and used they'll do the vector backend so I'm using I'm testing it out on the documentary finding one, but just to see the concept and how it works with agency.
\ No newline at end of file
+Look at the Facer's, I'm really surprised for no one's made a Hebrew date watch on the Facer creator, but it's probably the developer studio from Samsung is the way to go for that. And I want to edit, like the one that I have slightly, I can't find the perfect one, people put too much on them. I'm looking at the face I got from Facer now and they've added temperature, sunrise, sunset, neither of which work, I guess the integrations don't work, but who wants that on their watch? These are all like anti-simplicity. I just want... It's almost perfect, but they added these stupid unnecessary features.
Maybe on the Facer creator marketplace, I can just create one that I want. Maybe that will actually work. That's probably the easiest way to go. But if that doesn't work, I can create one on Github and open sources, the font that I want, but the Hebrew one would be very special to me. It's definitely possible.
I'm looking at my desktop display. It says 30 Tishra 5786. So for sure from the HIPAA Cal API the data source is there. And I looked last night and it seemed that people only had created sort of ones for from a very different reason.
The VoiceNote data set I really want to create as well. That's actually a very important project, the GUI for adding that I have a backlog of literally thousands and it would form the basis for my classification model which I should probably note out and that's a real model I can build for the idea as well.
\ No newline at end of file
diff --git a/transcripts/uncorrected/33.txt b/transcripts/uncorrected/33.txt
index 847a19b97210af5a0d79cb54c259b54cbe8103aa..eaea5b9166faabd9642d0c97478ecd6f6fd86d89 100644
--- a/transcripts/uncorrected/33.txt
+++ b/transcripts/uncorrected/33.txt
@@ -1 +1 @@
-Create now a meetings taker, meetings minute producer. It will have the following functionality. The user will upload a recording of meetings, of a meeting that took place. and we'll provide then there will be a section so that's an audio upload functionality the next one will be a meeting participants the user will provide the names and identifying characteristics of people who are audible in the recording so it'll say like for example and there should be Name, Description, Daniel, male voice in the recording, Hannah, female voice in the recording.
Upon receiving both of these things, it will send it to Gemini Multimodal in order to produce two things One is a transcript, slightly cleaned up diaries transcript That's one output and the second one is a minute which is a automatically generated minutes formatted with decisions, action items for each participant.
And then it should be integrated with Google Drive so the user can connect their Google Drive and save them to a folder after they've been generated and view them in the app.
\ No newline at end of file
+Okay, so I've just configured. VS Code is very, very important. I've just configured automatic updates, and I asked Claude, I said, why am I not getting them? Why do I, it says, you're out of date, download the Debian. And I said, I don't want to have to download a Debian every time, and I really want to keep this updated.
So it says, you should know, you need to join the Microsoft ASC, their repo, their third-party repo, which I had before then I think because I removed it as a duplicate.
So to clarify, it's not the case that you need to do this process. It is actually an automatic upgrade thing but you do need to be attached to the Microsoft repo to get those.
\ No newline at end of file
diff --git a/transcripts/uncorrected/34.txt b/transcripts/uncorrected/34.txt
index 73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c..ffc57e5992be591a97dbd7ee169ed839fe73e975 100644
--- a/transcripts/uncorrected/34.txt
+++ b/transcripts/uncorrected/34.txt
@@ -1 +1 @@
-I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
+I want to add to my DSR Holdings a LLM store TXT. It's almost a pity I didn't talk about this with Shlomo, but a radical idea. It actually, I mean, it appears to be working. I don't know if you're sure where I read from if it just parts my home page or read the txt but I asked Claude to pull in some context data about me into the into the file it seemed to work really well so what the thought I had for I mentioned Shlomo and what I thought about for myself is inbound LLM marketing considering AI traffic.
It's a pity I didn't take some in fact I'll add to the DAM a screenshots folder because a perfect example of a screenshot was the last time that I saw a and I sure I see them almost every day A sign up form where they didn ask for was the LLM your referral source I think it's absolutely insanity that anyone, any company would not have LLM as top of their list of referral sources for traffic.
And this opens up a whole world actually of LLM analytics. and you see which LLMs are scraping our site. LLM optimization. And then basically the idea of being LLM as an inbound pipeline. If you did all this well, could you actually view large language models as an inbound traffic source saying Google's dead, LLM is where it's at.
Here's how you can, I mean, I would have to try these approaches on my own site, but all I can do there is keep optimizing and see if someone says, if you typed into ChatGPT in a month and said, I need someone who's good with AI in Jerusalem, Israel. Can you find any profiles? And if it worked, that would almost be the opposite to pursue the outbound track as well for jobs. But as a complementary angle of attack, I think it would be very interesting to see as an experiment even.
\ No newline at end of file
diff --git a/transcripts/uncorrected/35.txt b/transcripts/uncorrected/35.txt
index 24994713fc006cf39dff6433f341d9e5b812c141..e9383aa5db79a22c214793ffdd4a93fc6ed49a60 100644
--- a/transcripts/uncorrected/35.txt
+++ b/transcripts/uncorrected/35.txt
@@ -1 +1 @@
-So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
+Can I just make a suggestion? Before we proceed in this direction, I think that it definitely is the right content environment. But the reason I've created these is so that we have them ready for recurrent use. So Lama Index is very, very good and would be used for a lot of very versatile.
So before we start, let's update the cond environment to install all the different utilities we might need for tokenizing text, processing markdown, markdown to PDF, PDF splitting, all these different text utilities. Even ImageMagick typesetting utilities. Once we have that ready then we can begin. But let's get that environment good first if we can use a conda.yaml to define it.
In other words, take in the existing environment, make a few edits and then install that. Just remember there's an AMD GPU so it will affect the choice of packages.
\ No newline at end of file
diff --git a/transcripts/uncorrected/36.txt b/transcripts/uncorrected/36.txt
index 8eb532b0a713565b3b2fae20960656ec0d9e6e2f..68f0272363ffede253054f91243a4d0b8203d19b 100644
--- a/transcripts/uncorrected/36.txt
+++ b/transcripts/uncorrected/36.txt
@@ -1 +1 @@
-Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
+Okay, here's just a few more specific things that I want to include. So I see you mentioning hydration drinks, which is very important. Electrolyte tablets become very expensive. So there's a few things I'd like to explore. More cost-effective ways for making them. I think you can buy them as a dry powder is one idea. The second one is a homemade recipe.
The next set of ideas is I really really need to always have some kind of food stuff at home ready to eat. So there's a few things in that regard. A list of a kind of basic pantry shopping list. Obviously optimized for all the dietary recommendations we've discussed here. Suggestions for, and I think protein bars aren't really enough, it needs to be carbohydrate as well. Recipes or suggestions for homemade protein bars for the same reason that they become very expensive to buy them individually.
That's probably the key thing I'm looking for at the moment is to have always on hand the ingredients and ideally like kind of a backup layer like I kind of make these protein bars but I also and that's kind of the fallback but ideally I prefer to obviously eat and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/37.txt b/transcripts/uncorrected/37.txt
index 492695d3c04244eba8ee90b40f4d0ed8cbb6793b..b373213f419ec9b2e4b9ca165f42170441577ed2 100644
--- a/transcripts/uncorrected/37.txt
+++ b/transcripts/uncorrected/37.txt
@@ -1 +1 @@
-Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
+Okay there's a bunch of memory layer projects now to explore later that are actually it's not longer separation between vector storage and memory which makes sense because it's kind of basically the same server it's offered by API mem0 super memory remember api memories.api that's a good starter list and they can all be integrated and used they'll do the vector backend so I'm using I'm testing it out on the documentary finding one, but just to see the concept and how it works with agency.
\ No newline at end of file
diff --git a/transcripts/uncorrected/38.txt b/transcripts/uncorrected/38.txt
index acadef7c73d2b38c88ec7b03751c008a67eca4fc..847a19b97210af5a0d79cb54c259b54cbe8103aa 100644
--- a/transcripts/uncorrected/38.txt
+++ b/transcripts/uncorrected/38.txt
@@ -1 +1 @@
-Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
+Create now a meetings taker, meetings minute producer. It will have the following functionality. The user will upload a recording of meetings, of a meeting that took place. and we'll provide then there will be a section so that's an audio upload functionality the next one will be a meeting participants the user will provide the names and identifying characteristics of people who are audible in the recording so it'll say like for example and there should be Name, Description, Daniel, male voice in the recording, Hannah, female voice in the recording.
Upon receiving both of these things, it will send it to Gemini Multimodal in order to produce two things One is a transcript, slightly cleaned up diaries transcript That's one output and the second one is a minute which is a automatically generated minutes formatted with decisions, action items for each participant.
And then it should be integrated with Google Drive so the user can connect their Google Drive and save them to a folder after they've been generated and view them in the app.
\ No newline at end of file
diff --git a/transcripts/uncorrected/39.txt b/transcripts/uncorrected/39.txt
index 48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c 100644
--- a/transcripts/uncorrected/39.txt
+++ b/transcripts/uncorrected/39.txt
@@ -1 +1 @@
-Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
+I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
diff --git a/transcripts/uncorrected/4.txt b/transcripts/uncorrected/4.txt
index e0ca9c5f871fe1db6ec60a09ae492e1cb1614512..b1426296d30ff0869552d23ba87ea8872a0ba3dd 100644
--- a/transcripts/uncorrected/4.txt
+++ b/transcripts/uncorrected/4.txt
@@ -1 +1 @@
-Okay, so for Kdenlive, I wanted to get a macro pad with three toggles for video editing, a control surface in other words. I know that people on use, there's a few macro paths or there's a large community of people who have adapted different things for use with Kdenlive as control panels or control surfaces as they're called.
I have a friend who is a photographer and he bought an off-the-shelf controller and used it as a control surface for something else. And it made me think, is there anything that people commonly use for Kdenlive? What would be really helpful would be the three wheels for color correction, which would probably be... Those are, I guess, kind of toggles, and then scroll wheels for three scroll wheels, and it's always in pairs of three for that. But yeah, those are the ones that people commonly use and like.
\ No newline at end of file
+I want to order a book about Python today and work through that course and see if the one that I'm paying for in Pluralsight has a Python course.
Python is real community, Python is real conferences, Twitter accounts, subreddits, map out the ecosystem for learning this really really thoroughly.
But beyond 3.13 for example, just like a big Reference.
\ No newline at end of file
diff --git a/transcripts/uncorrected/40.txt b/transcripts/uncorrected/40.txt
index 353b380ddee0d6134e7cfc905de9171524ef566e..24994713fc006cf39dff6433f341d9e5b812c141 100644
--- a/transcripts/uncorrected/40.txt
+++ b/transcripts/uncorrected/40.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
+So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
diff --git a/transcripts/uncorrected/41.txt b/transcripts/uncorrected/41.txt
index 0ec335394a72e80887a3672f290bc5828d8227e0..8eb532b0a713565b3b2fae20960656ec0d9e6e2f 100644
--- a/transcripts/uncorrected/41.txt
+++ b/transcripts/uncorrected/41.txt
@@ -1 +1 @@
-I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
+Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
diff --git a/transcripts/uncorrected/42.txt b/transcripts/uncorrected/42.txt
index 243f36cf36c052964af7ebe83a792dae9e67d205..492695d3c04244eba8ee90b40f4d0ed8cbb6793b 100644
--- a/transcripts/uncorrected/42.txt
+++ b/transcripts/uncorrected/42.txt
@@ -1 +1 @@
-I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
+Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
diff --git a/transcripts/uncorrected/43.txt b/transcripts/uncorrected/43.txt
index 35a55fa10abb62fbf49bc2c38d73e8cc53fca620..acadef7c73d2b38c88ec7b03751c008a67eca4fc 100644
--- a/transcripts/uncorrected/43.txt
+++ b/transcripts/uncorrected/43.txt
@@ -1 +1 @@
-This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
+Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
diff --git a/transcripts/uncorrected/44.txt b/transcripts/uncorrected/44.txt
index e3960e6d457375f71a0aa63d07c4c8ad4af74fc2..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 100644
--- a/transcripts/uncorrected/44.txt
+++ b/transcripts/uncorrected/44.txt
@@ -1 +1 @@
-Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
+Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
diff --git a/transcripts/uncorrected/45.txt b/transcripts/uncorrected/45.txt
index 4215c595a95e066a9ecda2a2ae08b9013686c002..353b380ddee0d6134e7cfc905de9171524ef566e 100644
--- a/transcripts/uncorrected/45.txt
+++ b/transcripts/uncorrected/45.txt
@@ -1 +1 @@
-Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.
Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.
Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.
The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.
Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.
The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer.
\ No newline at end of file
+I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
diff --git a/transcripts/uncorrected/46.txt b/transcripts/uncorrected/46.txt
index 145fac41057e67a2489a588fef1f5d5a4b0df965..0ec335394a72e80887a3672f290bc5828d8227e0 100644
--- a/transcripts/uncorrected/46.txt
+++ b/transcripts/uncorrected/46.txt
@@ -1 +1 @@
-Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.
Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.
Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.
And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK
So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.
I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.
I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.
So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that !
\ No newline at end of file
+I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
diff --git a/transcripts/uncorrected/47.txt b/transcripts/uncorrected/47.txt
index b314f3f74074ca02c2a47132cea688da6abb56d9..243f36cf36c052964af7ebe83a792dae9e67d205 100644
--- a/transcripts/uncorrected/47.txt
+++ b/transcripts/uncorrected/47.txt
@@ -1 +1 @@
-Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.
Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.
And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.
And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.
But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be.
\ No newline at end of file
+I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/48.txt b/transcripts/uncorrected/48.txt
index 8d2caf72445f7704d8455a3c2b790fdf76026b9e..35a55fa10abb62fbf49bc2c38d73e8cc53fca620 100644
--- a/transcripts/uncorrected/48.txt
+++ b/transcripts/uncorrected/48.txt
@@ -1 +1 @@
-The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.
I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system.
\ No newline at end of file
+This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
diff --git a/transcripts/uncorrected/49.txt b/transcripts/uncorrected/49.txt
index 2acd54bd254b2cdcc6a5457142eb4e0e917685f0..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2 100644
--- a/transcripts/uncorrected/49.txt
+++ b/transcripts/uncorrected/49.txt
@@ -1 +1 @@
-Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.
The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.
I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.
What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice.
\ No newline at end of file
+Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/5.txt b/transcripts/uncorrected/5.txt
index 2a58c5f30c61703af3ed4fd13d9b1c23315f1326..1d6b40cadcd1af0d2b5bcc01c9162dbc1436bbfa 100644
--- a/transcripts/uncorrected/5.txt
+++ b/transcripts/uncorrected/5.txt
@@ -1 +1 @@
-So I'd love to get your thoughts on the following. There's a tweet from Sam Altman that he wrote a few years ago and it's aged quite well as they say. He was announcing the release of ChatGPT and maybe an early iteration of ChatGPT, maybe 3 or 3.5 or something like that. Maybe even an earlier one. And the tweet went something like, it's our conversational, or first it's a conversational model or something.
And what's interesting to me about this is that I discovered AI through ChatGPT or got excited about it through that interface. And then from there worked back to more instructional workloads as then I used it as a chat interface, then began using LLMs through their API endpoints and then began using them programmatically and scripting and using them on my local computer. And now I doing much more of that than I am using them as chatbots.
I know a lot of people, I think even people who are pretty technically literate, aren't really aware that there's, that there's, AI can be used in this way. But what's interesting about that tweet I mentioned is it inferred that instructional models actually predate conversational models. In other words, that I think what he was saying was that OpenAI had developed GPT firstly for instruction following, and then they sort of refined it for conversation.
And what I'm curious to know is, is that accurate that instructional models predate conversational models and if so by sort of how long?
\ No newline at end of file
+The home server, which is actually an old desktop that was repurposed. It is an i3. The motherboard is very old. It's overall about an 8-9 year old computer with a very basic Nvidia GPU. and it's been fine for the workload so far because it was certainly gotten lots and lots of value out of the hardware. There's certain things that it can't do however. One of those things is the first real blocker I ran into was NVIDIA. Sorry, it was Frigate. Trying to run that just wasn't able to handle the... I couldn't do the GPU offload, I guess, because the GPU was too old.
And I kind of boomeranged in self in that I done a lot of it over the years. With AI, the advent of AI development however have actually warmed up to self again because deploying things at home and maintaining software has become a lot easier when you can debug common problems and handle installations with an AI assistant. One of the software products that I've deployed recently is called Resource Space. It's a digital asset manager, a DAM. And this is something actually that I've wanted for many, many years because I've been involved in content creation and photography and videography and I've always really wanted to build up my own stock library of assets.
Cloud hosted DAMs are just too expensive for hobbyists like me, if you want to call it like that, even though some of it's related to my job. They're typical enterprise products, so it's actually a perfect use case for self-hosting because having the media resources on the local environment makes sense from an editing standpoint, where I'm editing at home. And it provides something that I couldn't afford and makes it available for free.
With Resource Space, the constraint seems to be in I think all the workloads that go on. When you upload stuff, it then runs some metadata processing. It tries to run some facial recognition stuff. And it feels at this point that rather than one and without them it just really can't process uploads. So I kind of feel that I thought about maybe putting in a new CPU and increasingly I kind of think that well maybe actually that's not going to do anything for all the other constraints and maybe as I've decided to actually go into self-hosting and I see it as a long-term thing I want to keep doing for my business, maybe this would actually be a good time to just say I've gotten enough use out of this computer, maybe I want to keep the hard drives or the SSDs.
I mean maybe even not that, maybe it just a good time to say this is time for an upgrade. So what I'm looking for is what I keep what I always feel about whenever I open up a desktop I feel like there's just a lot of space that's not utilized in terms of physically, there's just open space in the case. I don't know where that is. And I'm wondering, I feel like for my current workloads, so it's Proxmox with ZFS and then there's Ubuntu on top of that. So I don't think a mini PC is going to be powerful enough to do all these workloads.
I do want to have an NVIDIA GPU, ideally. And many more. Hi, by 30cm long tower desktop. The way computers are bought in Israel, where I live, is actually mostly it's a spec-based ordering process by which you go into a computer store, you describe what you want, what you need. They will, you'll agree upon a spec, they'll give you a price. And then they'll actually assemble the computer for you. So it's not, I mean, you can buy off-the-shelf servers and whatnot, Amazon, and more.
And besides the other stuff that I mentioned our restreamer for the camera I would like to run Frigate. I would like to run Resource Space. And it would be nice to be able to run local AI inference, but I think I know that really pushes the budget up a significant amount. So with all that spec in mind, give me a few suggested specifications. And then importantly for each of those form factor in terms of what is the most compact form factor that I could maybe condense all of that into.
And finally, one option for buying outside of Israel is that you might visit the US in a few months. And if it's something that can be small enough that could fit into a suitcase it could actually bring it back but I'd rather not go down that route but just as a possibility.
\ No newline at end of file
diff --git a/transcripts/uncorrected/50.txt b/transcripts/uncorrected/50.txt
index b2de03d17424a2fed8639d2dfa09c98e84d864d7..4215c595a95e066a9ecda2a2ae08b9013686c002 100644
--- a/transcripts/uncorrected/50.txt
+++ b/transcripts/uncorrected/50.txt
@@ -1 +1 @@
-It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.
And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached.
\ No newline at end of file
+Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.
Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.
Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.
The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.
Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.
The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer.
\ No newline at end of file
diff --git a/transcripts/uncorrected/51.txt b/transcripts/uncorrected/51.txt
index f2066bdff489a0e7af0c17fa8ccf736412194aad..145fac41057e67a2489a588fef1f5d5a4b0df965 100644
--- a/transcripts/uncorrected/51.txt
+++ b/transcripts/uncorrected/51.txt
@@ -1 +1 @@
-Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.
Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.
So if you happen to know, you should know of any products on AliExpress and product numbers, list them please.
\ No newline at end of file
+Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.
Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.
Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.
And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK
So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.
I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.
I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.
So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that !
\ No newline at end of file
diff --git a/transcripts/uncorrected/52.txt b/transcripts/uncorrected/52.txt
index 73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c..b314f3f74074ca02c2a47132cea688da6abb56d9 100644
--- a/transcripts/uncorrected/52.txt
+++ b/transcripts/uncorrected/52.txt
@@ -1 +1 @@
-I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
+Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.
Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.
And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.
And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.
But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be.
\ No newline at end of file
diff --git a/transcripts/uncorrected/53.txt b/transcripts/uncorrected/53.txt
index 24994713fc006cf39dff6433f341d9e5b812c141..8d2caf72445f7704d8455a3c2b790fdf76026b9e 100644
--- a/transcripts/uncorrected/53.txt
+++ b/transcripts/uncorrected/53.txt
@@ -1 +1 @@
-So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
+The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.
I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system.
\ No newline at end of file
diff --git a/transcripts/uncorrected/54.txt b/transcripts/uncorrected/54.txt
index 5eac1414e49e1b8618ce1ba2193d7d10b91f431a..2acd54bd254b2cdcc6a5457142eb4e0e917685f0 100644
--- a/transcripts/uncorrected/54.txt
+++ b/transcripts/uncorrected/54.txt
@@ -1 +1 @@
-I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.
In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end?
\ No newline at end of file
+Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.
The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.
I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.
What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice.
\ No newline at end of file
diff --git a/transcripts/uncorrected/55.txt b/transcripts/uncorrected/55.txt
index 8eb532b0a713565b3b2fae20960656ec0d9e6e2f..b2de03d17424a2fed8639d2dfa09c98e84d864d7 100644
--- a/transcripts/uncorrected/55.txt
+++ b/transcripts/uncorrected/55.txt
@@ -1 +1 @@
-Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
+It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.
And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached.
\ No newline at end of file
diff --git a/transcripts/uncorrected/56.txt b/transcripts/uncorrected/56.txt
index 492695d3c04244eba8ee90b40f4d0ed8cbb6793b..f2066bdff489a0e7af0c17fa8ccf736412194aad 100644
--- a/transcripts/uncorrected/56.txt
+++ b/transcripts/uncorrected/56.txt
@@ -1 +1 @@
-Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
+Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.
Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.
So if you happen to know, you should know of any products on AliExpress and product numbers, list them please.
\ No newline at end of file
diff --git a/transcripts/uncorrected/57.txt b/transcripts/uncorrected/57.txt
index acadef7c73d2b38c88ec7b03751c008a67eca4fc..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c 100644
--- a/transcripts/uncorrected/57.txt
+++ b/transcripts/uncorrected/57.txt
@@ -1 +1 @@
-Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
+I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
diff --git a/transcripts/uncorrected/58.txt b/transcripts/uncorrected/58.txt
index 48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45..24994713fc006cf39dff6433f341d9e5b812c141 100644
--- a/transcripts/uncorrected/58.txt
+++ b/transcripts/uncorrected/58.txt
@@ -1 +1 @@
-Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
+So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
diff --git a/transcripts/uncorrected/59.txt b/transcripts/uncorrected/59.txt
index 353b380ddee0d6134e7cfc905de9171524ef566e..5eac1414e49e1b8618ce1ba2193d7d10b91f431a 100644
--- a/transcripts/uncorrected/59.txt
+++ b/transcripts/uncorrected/59.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
+I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.
In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end?
\ No newline at end of file
diff --git a/transcripts/uncorrected/6.txt b/transcripts/uncorrected/6.txt
index 7ade92ea48527be48c9ed28805bb0153509bb3a1..a6c4aa0dea473932cb03dfed9978e4ce2702e4a1 100644
--- a/transcripts/uncorrected/6.txt
+++ b/transcripts/uncorrected/6.txt
@@ -1 +1 @@
-Here's my idea for an AI podcast workflow. I think if it's just questions summarized by AI and people know that the whole thing is text to speech, it's a little bit off-putting because people think I don't want to listen to just a robot speaking the whole time.
I think if the podcast format was that my voice prompt actually makes it into the final output so it starts with me recording a voice prompt as I'm doing now, then that gets transcribed. Then the rest of the workflow is the same, but what I do for the actual episode render is I combine my voice prompt with the AI response. So that you really get the feeling that it's me actually asking something that's definitely not AI. That I'm an identifiable person speaking. And then the podcast goes from there.
I think it would be more effective and more impressive and more enjoyable to listen to.
\ No newline at end of file
+For Frigate Plus, what I want to do is as follows. I'm looking into getting a new, getting a server. And I'm conscious that you want empty labeling and identifying labeling. Both of them. So I'm going to curate those or gather those on the cameras.
And now, and if slash when I do the server upgrade, the home server upgrade, and then I would move over to Freigate and then actually start using the train models.
Worst case scenario it's just $50 and I never actually end up using the stuff but I'm hoping that I will at some point.
\ No newline at end of file
diff --git a/transcripts/uncorrected/60.txt b/transcripts/uncorrected/60.txt
index da218ad130c3c5a5f3ca672509c6c517f4fa87f2..8eb532b0a713565b3b2fae20960656ec0d9e6e2f 100644
--- a/transcripts/uncorrected/60.txt
+++ b/transcripts/uncorrected/60.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.
When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.
The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.
This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case.
\ No newline at end of file
+Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
diff --git a/transcripts/uncorrected/61.txt b/transcripts/uncorrected/61.txt
index 0ec335394a72e80887a3672f290bc5828d8227e0..492695d3c04244eba8ee90b40f4d0ed8cbb6793b 100644
--- a/transcripts/uncorrected/61.txt
+++ b/transcripts/uncorrected/61.txt
@@ -1 +1 @@
-I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
+Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
diff --git a/transcripts/uncorrected/62.txt b/transcripts/uncorrected/62.txt
index 243f36cf36c052964af7ebe83a792dae9e67d205..acadef7c73d2b38c88ec7b03751c008a67eca4fc 100644
--- a/transcripts/uncorrected/62.txt
+++ b/transcripts/uncorrected/62.txt
@@ -1 +1 @@
-I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
+Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
diff --git a/transcripts/uncorrected/63.txt b/transcripts/uncorrected/63.txt
index 35a55fa10abb62fbf49bc2c38d73e8cc53fca620..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 100644
--- a/transcripts/uncorrected/63.txt
+++ b/transcripts/uncorrected/63.txt
@@ -1 +1 @@
-This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
+Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
diff --git a/transcripts/uncorrected/64.txt b/transcripts/uncorrected/64.txt
index e3960e6d457375f71a0aa63d07c4c8ad4af74fc2..353b380ddee0d6134e7cfc905de9171524ef566e 100644
--- a/transcripts/uncorrected/64.txt
+++ b/transcripts/uncorrected/64.txt
@@ -1 +1 @@
-Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
+I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
diff --git a/transcripts/uncorrected/65.txt b/transcripts/uncorrected/65.txt
index 73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2..da218ad130c3c5a5f3ca672509c6c517f4fa87f2 100644
--- a/transcripts/uncorrected/65.txt
+++ b/transcripts/uncorrected/65.txt
@@ -1 +1 @@
-Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.
So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.
So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.
So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.
The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot
\ No newline at end of file
+I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.
When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.
The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.
This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/66.txt b/transcripts/uncorrected/66.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0ec335394a72e80887a3672f290bc5828d8227e0
--- /dev/null
+++ b/transcripts/uncorrected/66.txt
@@ -0,0 +1 @@
+I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
diff --git a/transcripts/uncorrected/67.txt b/transcripts/uncorrected/67.txt
new file mode 100644
index 0000000000000000000000000000000000000000..243f36cf36c052964af7ebe83a792dae9e67d205
--- /dev/null
+++ b/transcripts/uncorrected/67.txt
@@ -0,0 +1 @@
+I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/68.txt b/transcripts/uncorrected/68.txt
new file mode 100644
index 0000000000000000000000000000000000000000..35a55fa10abb62fbf49bc2c38d73e8cc53fca620
--- /dev/null
+++ b/transcripts/uncorrected/68.txt
@@ -0,0 +1 @@
+This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
diff --git a/transcripts/uncorrected/69.txt b/transcripts/uncorrected/69.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2
--- /dev/null
+++ b/transcripts/uncorrected/69.txt
@@ -0,0 +1 @@
+Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/7.txt b/transcripts/uncorrected/7.txt
index ed50ed359bc55372aab37746585a31f7525ccc9a..57ee9e7328b60a23b4d9d39ea97021e9d3ff8e2d 100644
--- a/transcripts/uncorrected/7.txt
+++ b/transcripts/uncorrected/7.txt
@@ -1 +1 @@
-I will try to build. What I want to build is this: I don't know, is there a name for this kind of workflow? So let's say I go out taking B-roll. Now, right now I'm using a lot of it for populating my own library, and sometimes I share it with stock libraries. And usually, they strip the sound. I like to have a workflow in which, well, my ideal workflow would probably be something like this.
Let's say I have a folder full of media and P4 files. I can usually end up with a few mistakes, unintentional takes, and those usually would be like kind of less than five seconds duration. Usually, I just eyeball and I look for the ones with a small file size that's too small. Next thing I like to do would be stripping out the audio, batching, putting the video into its own folder, and then maybe, because for stock I'm shooting it handheld, it should be stabilized. So, stabilization.
So it's basically a pipeline. And my question is this: can this be done? But if I want to build a few pipelines like this, this is, let's say, my stock video pipeline. I might have another pipeline for sorting, so I might have a few media pipelines, and I don't want to have to go every time into a repository and run it. But it does make sense that it's just a script, basically.
So what's the best way to have a few scripts? I'm basically asking what's a good GUI for this kind of workflow? I want to have my media folders, and then I want to say run this script within this folder, and that would take the TDM out of setting up and resetting up environments and Python and all the rest of it. So what would you recommend as a tool for doing that?
\ No newline at end of file
+So, I have a question. For image to video, it's currently expensive, very expensive actually. I'm trying to find a way. So I found the WAN models, which are by Alibaba. I find them to be very good, and they have a more affordable WAN model that I like using. And when I'm doing a video, I frequently gather up my images, gather up my prompts, and I move in towards a workflow by which I kind of do the storyboarding, gather the source material as I call it, the photos. Gather the prompt together, and then I will run it as a script, which is a very novel way for me of approaching content creation in the sense that it's programmatic and it's code first.
Which is a strange way to approach a creative process, but it works. And it seems to me at the moment to be the most effective way to do this because otherwise, before this, I was using a playground, running them one by one, importing them to a video editor, and it's just a lot slower that way. Now the issue is that image to video, as I mentioned, is expensive. And if I'm doing these projects for fun, I have a lot of ideas I want to do for fun. But even the cheaper WAN models are in the region of 10 to 15 cents per generation, which could easily, it's very easy to go through 20 or even 50 dollars, especially given the fact that frequently you need to generate the same prompt multiple times before you get a satisfactory result.
I really, really want to explore image to video, and I'm trying to find a way to have an affordable way to play around with it even if it's not the best model. And you know, so what I've been thinking of is I come across for a while providers like RunPod who do make GPU available either in serverless functions or they do per hour pricing on GPUs. And since I discovered Replicate and FAL, I've kind of wondered, well, if you can just make an API call, why go to the trouble of managing an instance of a machine? I'm thinking now that it might be the cost reason that if the machines are a certain price per hour, it might actually be a lot more cost-effective than using an API.
So my question is, firstly, is that the case? Is a frequent reason that people actually do these or use these services for cost mitigation? And so on. So that's the first thing. Secondly, serverless versus pods as RunPod calls them. I guess serverless almost makes more sense to me because you just pay for what you use and you don't need to worry about starting and stopping the pod and configuring auto shutdown policies. So what’s the reason that people go for pods over serverless?
And finally, if I want to do this, probably the objective would be, is there a way that you can have like your own API endpoint and that's running stuff on the serverless function in the backend? And what I get confused about for these things, the first time I did it, if I'm not mistaken, I did it with video generation. The video actually generated on my local, which seems almost like magic to me. So you're doing the actual inference rendering in the cloud. And is it just the case when that happens? And so on. And then just running my script and then I'm using on-demand compute.
\ No newline at end of file
diff --git a/transcripts/uncorrected/70.txt b/transcripts/uncorrected/70.txt
new file mode 100644
index 0000000000000000000000000000000000000000..73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2
--- /dev/null
+++ b/transcripts/uncorrected/70.txt
@@ -0,0 +1 @@
+Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.
So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.
So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.
So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.
The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot
\ No newline at end of file
diff --git a/transcripts/uncorrected/8.txt b/transcripts/uncorrected/8.txt
index acc8d62d6d5b71235676ccf824c7860bf8c12d53..a2ad0808542f04e9e26405fe883f5a3a95fa8ce7 100644
--- a/transcripts/uncorrected/8.txt
+++ b/transcripts/uncorrected/8.txt
@@ -1 +1 @@
-I have a question here. I was exploring lately, getting up earlier, and it always really appealed to me. The idea of getting in sync with the sun, like the natural diurnal cycle. Stricadian rhythm, when the sun goes down approximately that's when you get ready for bed. When the sun comes up, that's maybe when you get ready, that's when you get up. But that would require, in the winter time at least, here, where I live, going to bed as early as, I mean I guess it depends. Whether you'd want to go to bed immediately at sundown, I think that's probably not realistic, and a couple of hours later. But even if you did the latter, you'd be talking about going to bed at like 8 o'clock in the winter, maybe as early as 7.
Now my question is, my interest in this really comes from a question I've always wondered or thought about, which is that until relatively recently there was no such thing as artificial illumination that you could click on with a switch in your home at least, and even the concept of street lighting being totally reliable and totally every street in a developed city being covered in street lighting, that was also a foreign concept. So in the evolution of humans, it seems to me it must be the case that this is a very recent adaptation.
So my question is really, from the historical record, what do we know about the kind of sleep cycle that humans gravitate to naturally when there isn't alternative lighting? Artificial lighting. Thanks for watching!
\ No newline at end of file
+Yeah, I think I would look for... the truth is, I was initially... I have to try out my Cherry Red keyboard, the split one is a long term thing. But in the short term I have to say I've really warmed to MX Brown, and I think at this point I probably would use any MX Brown keyboard without noticing much of a difference from the AliExpress one, which is a brown imitation.
And this frankly one is it's a wired one and what I would like probably I'm thinking at the moment I wanted to set up a binding for cloud code and I think that rather than go down in the macro pad approach, which is one way, one approach certainly, it would be really nice to have a keyboard with built-in macro keys.
I think the MX Red one that I got has about five macro keys and I'm wondering if you can put about, you know, if you put up the entire top of the keyboard or the number pad, which I'm looking at the keyboard now. A lot of the keys that I rarely use are the sound controls, the number operators, pause, scroll lock, print screen. There's probably about 20% of the keyboard that I rarely touch.
Do you have any recommendations for a brown keyboard? Let's say I don't like compact keyboards, so I do like the full-size keyboard. The small keyboards feel cramped to me, but that has a full keyboard section and then maybe fills up some space on the right and along the top with macro keys, and so that rather than adding on micro pads you can just create some assignments on the keyboard itself.
\ No newline at end of file
diff --git a/transcripts/uncorrected/9.txt b/transcripts/uncorrected/9.txt
index 8a430e2e093e18c208dfeff13e05eabe999f06dd..e0ca9c5f871fe1db6ec60a09ae492e1cb1614512 100644
--- a/transcripts/uncorrected/9.txt
+++ b/transcripts/uncorrected/9.txt
@@ -1 +1 @@
-I have a Nord 3 5G and I'm looking for a power bank. It supports this fast charging protocol. I think it's called SuperVOOC. And I was looking for a power bank that could basically charge it as quickly as possible, deliver the fastest charging that it can support from a non-AC outlet.
I got one from Bezeus before. I don't know what it was, it was 65W, I don't know if that's relevant for mainly smartphones or if it's just for laptops. But in any case, I think I've lost that power bank, so I need a new one.
Now I guess what I would probably like is the biggest capacity that you can fit into a power bank form factor. By which I mean, at a certain point, we're not really mobile, they make these power stations I think they're called. So the biggest thing you can get, and not an exaggerated spec but a real credible spec in terms of the mAh.
And the quickest, the combination of the quickest charging and the biggest capacity for this particular phone. Anything you'd recommend from Mosaic or other, let's say more credible manufacturers?
\ No newline at end of file
+Okay, so for Kdenlive, I wanted to get a macro pad with three toggles for video editing, a control surface in other words. I know that people on use, there's a few macro paths or there's a large community of people who have adapted different things for use with Kdenlive as control panels or control surfaces as they're called.
I have a friend who is a photographer and he bought an off-the-shelf controller and used it as a control surface for something else. And it made me think, is there anything that people commonly use for Kdenlive? What would be really helpful would be the three wheels for color correction, which would probably be... Those are, I guess, kind of toggles, and then scroll wheels for three scroll wheels, and it's always in pairs of three for that. But yeah, those are the ones that people commonly use and like.
\ No newline at end of file