diff --git a/annotations/1.json b/annotations/1.json
index 3471d70512387ad9bfdbac7447da41d3fa596201..bad118fe4f7d573f949840238b2b875f3ddbaa6a 100644
--- a/annotations/1.json
+++ b/annotations/1.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:1ef573e41868438568b70cb756a76484f35624b9b1687ca691aa6fdd321fea83
-size 597
+oid sha256:a8154c04dd2f631f2fe09dc67fb987877220c46b09f888a2ba47edd8c6dc7752
+size 602
diff --git a/annotations/10.json b/annotations/10.json
index 3f91847a4af46307a917585c278bae419073111f..6aa3ac163853dc8da44c55337ffa6544faa590dc 100644
--- a/annotations/10.json
+++ b/annotations/10.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e9f299d5208dc8f29b5720551de52fbc502b6e5214d2fca4e1cd4c65012d15bd
-size 599
+oid sha256:b273dc900a7c974ac9f480230f3b5de55a63892b62076b0c9a5cf28731ad81c9
+size 597
diff --git a/annotations/11.json b/annotations/11.json
index df9d6461d80fb3265b7b8db50cac58abc103452e..4c6ede77336e08ef72e97d39fd6c48483a40f351 100644
--- a/annotations/11.json
+++ b/annotations/11.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:9f1c4f177ccfe50a3696c0c942f45438981346ec81c99464b80ba194b5338f3b
-size 603
+oid sha256:912db364a508766d91d294fde2eed9c474fdbe01bcc3d50fad1f9fb948ae455e
+size 599
diff --git a/annotations/12.json b/annotations/12.json
index 4affe1227e3c441d64766162a2e688f6f78445fd..db455c146ff26d35d583047ee49f0d75c586d7c7 100644
--- a/annotations/12.json
+++ b/annotations/12.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:23b04809b340ed4351ff5b213e95b16a96224053a74674a3a1ac372e4e562d25
-size 603
+oid sha256:a5335eaa4a92bae6d114c137c6a57cabc97353fd39b8caef0028efaa5f6933a3
+size 599
diff --git a/annotations/13.json b/annotations/13.json
index 244fe3b219be7a39279e71764ef4042847553248..ffdac65e94f760afa661eef4af18046a6edf9aef 100644
--- a/annotations/13.json
+++ b/annotations/13.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:4dd1d1128982fb4615867f14d828dd1e09b550fb6c220cddf57d0e8e66fc4588
-size 604
+oid sha256:fe2def254ffb6fdb21a45b9418bab1a9ecb9a33c39f432592524ff773ffc8504
+size 603
diff --git a/annotations/14.json b/annotations/14.json
index 76d5ae2b9e2f11f3774df97d571d6391920ab5dd..5756ec88668346923f179ea37df3f6cee9fcfad3 100644
--- a/annotations/14.json
+++ b/annotations/14.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:f33eb4953b7b482e32c591b6f10a065ca520b77ee8cd4d36b77af25f0f910d1e
-size 599
+oid sha256:98a66e6fa710252275ba0608ffc7de0580f05ed3a1b669acaf579d7e2d575c9a
+size 601
diff --git a/annotations/15.json b/annotations/15.json
index 254724e0bc3910bb84f081b3609f8d9805ef1bd9..1d406422227b3f523a88aa38a43b5d2dae8d90a9 100644
--- a/annotations/15.json
+++ b/annotations/15.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:bfbe3c38c4fe4fbe7cd336c5e3c86bb3f53fff082b0bb0fdab7a9dda85ffa1a2
+oid sha256:2c56cc2cc54b6bf8339074bb6d681eac27a25fe362c5a22ec5f418215de5d479
size 602
diff --git a/annotations/16.json b/annotations/16.json
index 0ba7c6c93a7666f98666add35ea506cf2106da01..e9c389e5f8bf0957633fb13fcb5fc424bad86066 100644
--- a/annotations/16.json
+++ b/annotations/16.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:f615f11f470c0e2538230b7ca93a9809a575b51d40ad9d1e8c3a13b46285c650
+oid sha256:bb6103b1cfb27a59d1147075ac129e55178d3c76d84bc19ceafd77ffc0b384eb
size 599
diff --git a/annotations/17.json b/annotations/17.json
index 696759c15f2c5ee6e429254fd4823b4ab98c77e1..c4d07afbd3e204a3e5b58bc37f5e2cb054292822 100644
--- a/annotations/17.json
+++ b/annotations/17.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:3751bb4cd150a363d1a335dfaa4f84500cd63a108b180343313b3679c015396e
-size 599
+oid sha256:3f6d3abfef2bc9f3f18790a7d33a7c814d94c59d560bd22c0880370ecb3c14ee
+size 603
diff --git a/annotations/18.json b/annotations/18.json
index 72ee8864498f7e6eca12eb21c9e9fb32b549398b..91a9eaf1b28a049bddd6017e23e4ea7df67a54de 100644
--- a/annotations/18.json
+++ b/annotations/18.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:dd3ae4cdd7180b1689f90371faf19e9e70db9f7c0c34ebdc3c852803cfc2bf9d
-size 604
+oid sha256:a689a4d3c6930f9b32bccb94974127a18cd2e3067b6fab49a15b04b14788f845
+size 603
diff --git a/annotations/19.json b/annotations/19.json
index f987f5005d208af71e318379c099bbfd6bbcf140..bb02642af6dca0baa0e213decbee25e67d3c98c7 100644
--- a/annotations/19.json
+++ b/annotations/19.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:bfef98966c657fe946f612142a5987f16ce3139213305b19f03bd5fcc0e607fc
-size 598
+oid sha256:6c89ba239d540fa8e2c537d510311580cea7b381b95d83568379a77b8588d1dc
+size 604
diff --git a/annotations/2.json b/annotations/2.json
index 0bcfff84dccda0203e3e17f9894c9ad126a12bc4..32a9603b9e169b4fd783225d50c0625e4585899b 100644
--- a/annotations/2.json
+++ b/annotations/2.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:6905bfed86dfec9c4b87c77eea691631a3f1fe81efec09692e06eca2489c32ed
-size 602
+oid sha256:c673215dd52b6f7f1c7147452c3ee2ddfc4e2084fd22140fafcf06d84ad5c49b
+size 600
diff --git a/annotations/20.json b/annotations/20.json
index 08abb78e02ff7f24720ee21a7958fc059b82e848..2fa439add3e605d063a5c2e611efb3e4ab8f1d56 100644
--- a/annotations/20.json
+++ b/annotations/20.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:8f21aa8fea1718711c927d7a6f3e3bf2b1a176b997c4e09d77e8983d1501239c
+oid sha256:409d6bdd91dfa0c0a70df0ed4061c5a6d9bc7a379d14a30b3dd79600eac287a8
size 599
diff --git a/annotations/21.json b/annotations/21.json
index 660a4d8f559dc589d72d3aa398048ed58a421205..2a57228fec99ba478dd3582cdaca8c3b6e9ffc28 100644
--- a/annotations/21.json
+++ b/annotations/21.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:55463bdd516496a7c512b8452cba5f761ed505abeef49ee3b4b3a9fa9df98f18
-size 603
+oid sha256:cdcfed19af2a58a4d1c078d498dcf83f91d81321f418c46d8232c12f34843002
+size 602
diff --git a/annotations/22.json b/annotations/22.json
index a7414d3aa1a0ac33de288534bcad553b3c701a84..799bdb3e7380230006240f2ac603064dcc589a88 100644
--- a/annotations/22.json
+++ b/annotations/22.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:45147b91b931b686dc1e6d395483a6ddb74482dd045b0f88fe8363bfd1527b53
-size 604
+oid sha256:6c81eb7acd67962b78b64273084cf430439ef4e5e16bb4a8bb1d0712c90ab6b2
+size 599
diff --git a/annotations/23.json b/annotations/23.json
index 038a3c4fa73a731358ce1273b9e1146fb3ab4a71..3354cf76595238a6c8ae4e5ce1a58a611161151e 100644
--- a/annotations/23.json
+++ b/annotations/23.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e74adfbe606649bcbbec8b7436a2cd5a7eba62609c84900f4b83b8dc8e83ed77
-size 596
+oid sha256:1ebbedc207d424e1894232b25e902eb133c89c3325c9f430b028c808f84ba332
+size 599
diff --git a/annotations/24.json b/annotations/24.json
index 6744fb89b6c47e0883f0c194e79276284290c6fb..470cc0fe01e64c3a55f577f195fca73b9b968f69 100644
--- a/annotations/24.json
+++ b/annotations/24.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:3c0f6dc940546068525917dc4821dda6535bd186e7df6d0c9d2498cd1dc48a70
-size 598
+oid sha256:ee9e88142c8713c91b72a06ddb4b849438991b340002d369d4d9323f6398741b
+size 604
diff --git a/annotations/25.json b/annotations/25.json
index 5959486353a9f011c55f162ee6e4657d2260a2f4..8ec71005555d18128286fed1b6964dd73786f127 100644
--- a/annotations/25.json
+++ b/annotations/25.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:adfb4bd4dc58ab10a139a92bab2550d6d29063260ecd5f906b078b7c60af0ae8
+oid sha256:f44c0f2d4b74ccaa163a41f59b43a57e51093a4fd51598350b086d192274669c
size 598
diff --git a/annotations/26.json b/annotations/26.json
index bdde05482d09b55340775277b9e4f66634807f85..1a733581dc81874a847dd3d112f763a714c8e237 100644
--- a/annotations/26.json
+++ b/annotations/26.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:4336e3a5f450ab3c528e606b5a02e39bcb1c4e7488e0acbbee7784cc444c93fe
-size 603
+oid sha256:861a8c0a0ce5a1556061139b57c7bf2d806a429ff9e46f17eeb531adc435b277
+size 599
diff --git a/annotations/27.json b/annotations/27.json
index 29a6c795b0f523ba4112feac518a8e49171aac8e..334172e2b7557a2a780f6cedbd94cc5cd8893351 100644
--- a/annotations/27.json
+++ b/annotations/27.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:fa16f92d2cfa0e80af3f2915abdb4d4e31c96d15c74576e3904fe1de72dee0e9
-size 602
+oid sha256:b35c3c4c8a6efd5738c244c8283646f3529a09a1c1d975fb258119acc8b0537c
+size 603
diff --git a/annotations/28.json b/annotations/28.json
index 178c404dbd9b9454213d00d6328b76d23e744659..83cf1fdd53d2b2a26175032b8bc2e81124a10a4c 100644
--- a/annotations/28.json
+++ b/annotations/28.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5fe952254097a84d1720617b0d8b593872a667fa09901a847b9368aaf9b3f7f6
-size 600
+oid sha256:9f8d726fcbc438a499ce58d1ea5031280acbbf60bb70b6f5649397847655cd9f
+size 604
diff --git a/annotations/29.json b/annotations/29.json
index 60bc49a2b1325b7f5d149c2c6870c789d4deab96..31475f2470809ab729522b0a5e29f812af674f2c 100644
--- a/annotations/29.json
+++ b/annotations/29.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:837d7dea72fa1401b3b861d1282f52727c7955a98eb76db0b01eda5a54f08c25
-size 601
+oid sha256:1ca1915c6fe5fa0affaf6a748f3b8c15452c8dacf1d121d2b9db14dac78e9113
+size 596
diff --git a/annotations/3.json b/annotations/3.json
index ece07396a32791db4b5c2f4283613886af847ff7..4480257e00f7096eec8ab70467fe98c484838d87 100644
--- a/annotations/3.json
+++ b/annotations/3.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:9f38dee5fd65537ec99c28177860f73b8ea944e20a84ed036263b6978dbce7be
+oid sha256:0280356d5eea3df0575a597b328d5f8ede01bfec55f2f632abe84f4061176a57
size 603
diff --git a/annotations/30.json b/annotations/30.json
index fab3aec15bf2550b7466b5ee96881dba9b033744..3d041e4cea9b8c8f85b4ed49ee11d0941fa6c050 100644
--- a/annotations/30.json
+++ b/annotations/30.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:d5da54d91c446e68faa415b7012b9381cb9963aa2c53b83d82c977f3862b9a7e
+oid sha256:9142ee0c4d896bc35e708d141a16d2713ebac7466639bd7d4ecf23ac0e2f47b8
size 598
diff --git a/annotations/31.json b/annotations/31.json
index b3a641b3e56eaa387e5c0e28b0599a2393f3b092..3c5cebe5d7068ad189889a20e10e120dd8f30901 100644
--- a/annotations/31.json
+++ b/annotations/31.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:0c7432d6afb78d676eb0647f804f486661251f8d45f0c0dd862adc717704e3e7
-size 602
+oid sha256:b5a488a6db54453c56459f12b15f086dd7de99f5b1ac51aab76c6912548d9486
+size 598
diff --git a/annotations/32.json b/annotations/32.json
new file mode 100644
index 0000000000000000000000000000000000000000..36ca0a4132d6044f64e58fb70ae32fdab04b3f97
--- /dev/null
+++ b/annotations/32.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3bb30a704a8c07d3b7285736cccfb474137dad9e91ccf2925b930ee645c6a380
+size 603
diff --git a/annotations/33.json b/annotations/33.json
new file mode 100644
index 0000000000000000000000000000000000000000..c9a5da6fd2140284f91ad91992e9eaf2fdcc236f
--- /dev/null
+++ b/annotations/33.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f786b3c0d45a110d9461e8708b2882569d51da303b9f279206498d937ad228b4
+size 602
diff --git a/annotations/34.json b/annotations/34.json
new file mode 100644
index 0000000000000000000000000000000000000000..33b2c34be3945b0eb4fd74a5112385c217de4278
--- /dev/null
+++ b/annotations/34.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:696c1f77a948872d28f750524e9442255827b12a4d951c60559aa8b21bfa9443
+size 600
diff --git a/annotations/35.json b/annotations/35.json
new file mode 100644
index 0000000000000000000000000000000000000000..b8d09b12f87a3f1b88a2880721bb723ea881d161
--- /dev/null
+++ b/annotations/35.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a83f5561710628c65e4e0001888af0bf14cf7671a38bbcef7699cf718501caae
+size 601
diff --git a/annotations/36.json b/annotations/36.json
new file mode 100644
index 0000000000000000000000000000000000000000..d5473f9cc53c212e9efcb4f9a3a822e1582b213c
--- /dev/null
+++ b/annotations/36.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7dfcccfaf72c4b575f3bd646171dd021953f8572900abf845f64e14d94282d3a
+size 598
diff --git a/annotations/37.json b/annotations/37.json
new file mode 100644
index 0000000000000000000000000000000000000000..0ae098919755ae049ca9526bd4eab951cdc9485e
--- /dev/null
+++ b/annotations/37.json
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cafe80f10939e217489bc9b355cbeb9b24d3b64868b56a4486d746234eedbc1a
+size 602
diff --git a/annotations/4.json b/annotations/4.json
index 6b4b5c2dcca7b5a7de877801eae08b85d20a2898..253a9c9857598affefe8ad6e162a1b291e97daac 100644
--- a/annotations/4.json
+++ b/annotations/4.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:cbe75f4d6a4d8dc4816b249f4ca538aaa878690db5cc0950a824bd9093bc7000
-size 596
+oid sha256:b366e4db751775d4eb22ee2cd01e6351915be30d0b934aef8dc0496ee182935f
+size 597
diff --git a/annotations/5.json b/annotations/5.json
index fc59cfeb6d65c8926dc1da2dc3ff3113dbfdc8b4..d8ee1f9c178fa9b1b53c0345a09a8175e8666696 100644
--- a/annotations/5.json
+++ b/annotations/5.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:ff95dbe981ebb9e13133c021bff8e3442cc0168387c2d8d91c3a5d2819f54edd
-size 598
+oid sha256:b1da0db979299d0c94660c86a4033415cc18f14de32a9d1e3c0059ca408b3548
+size 599
diff --git a/annotations/6.json b/annotations/6.json
index 8fe0dcb02ef18a160a3d621e11af6484114ea366..b252ac635f1a4a7064c27ebe965cc66968803a5a 100644
--- a/annotations/6.json
+++ b/annotations/6.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:6d80c741cd54ac37f0d1a5a88733f86e4ef511f88c56fd7db5647ec604177c5b
-size 598
+oid sha256:7afbf644af4ec714e43560a8e1a0768b0aaf5971c51955ee0d3a95334a433ea3
+size 603
diff --git a/annotations/7.json b/annotations/7.json
index 842225809d58d1fb38540e07d14fe93858aff3ec..1cc4d40d679a0cdd97916517bfc4c8abb153d9f2 100644
--- a/annotations/7.json
+++ b/annotations/7.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:61ce307495371bfcfadae60591dc9b3dd1d20a41540c1f88bbaf164264c4f5d1
-size 602
+oid sha256:42d7872e64d9f19f7506a86d1b77c3e39a1ba07b7f92fef84b1b971fd291b961
+size 597
diff --git a/annotations/8.json b/annotations/8.json
index e29a6a1748772c09dbe25aa3d52442f1adc8967e..8936e5458dae5f5a891e916765de08ebcce90ca8 100644
--- a/annotations/8.json
+++ b/annotations/8.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:b1c32219ebb9ecf47672ba4d7a1fc64b94e9d7f3d6f6c85b1c3982c47330894e
-size 600
+oid sha256:5b059c3d22b4385d037fdbd5a8052c6db0b98b1db50a6cfa6333bf572cb84c54
+size 602
diff --git a/annotations/9.json b/annotations/9.json
index fc0d4ca2fc959f6ecf317de4032b7d951c5bcbcb..1fc2790abc26d66d6bcaeaf5d8e39560ae81443d 100644
--- a/annotations/9.json
+++ b/annotations/9.json
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:76971585acea228e62831c3c086691f002d652aabdc2fdd36b0865fb4f6d896b
-size 601
+oid sha256:dbe32e8d570cda11f55f819df7505bab62338a98991734121d15232a885f0ef9
+size 603
diff --git a/audio/1.mp3 b/audio/1.mp3
index 5e27bedfea3c81a479d3f828a25a3d068a880445..262b049e2ae103339728c76ee6b436f31158b5a1 100644
--- a/audio/1.mp3
+++ b/audio/1.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:fcc72030e7b00e1fe1cc439adc827178a773a020a0a87e41077bc42ccf42126f
-size 2587436
+oid sha256:d4061565fdb2cb529f501326e9b63826412eb2bc67320c3c57a3a30216247b3f
+size 3313196
diff --git a/audio/10.mp3 b/audio/10.mp3
index a42b02acca82456e4c67bb0a1e5e945357a4e720..f9e8f7a5846a7ac0f5cebad7ee4214b58282fd8a 100644
--- a/audio/10.mp3
+++ b/audio/10.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:4abd166ccade3f644d84f0b394d7c64f90c5b8adee074b6b0bcfb53b95b8e07d
-size 2814956
+oid sha256:1c914d6e441095bd6a40aa5a558418ae5a321ccf83238b073b8f4f16c6154f39
+size 1672842
diff --git a/audio/11.mp3 b/audio/11.mp3
index 7e502ec47ad069dfe0de20c460c45c3b101b16cc..90557fc63594f7799e1923114ab958ead7e8fb88 100644
--- a/audio/11.mp3
+++ b/audio/11.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e946b6ca2eb04be83bb174d3e358090f03e670c0a9133da4a06406a24d96da31
-size 2668076
+oid sha256:736e83bcc12261e6d83c46b915a26d9c4fc4fbffdf441a4b6a1bc896300acf83
+size 649051
diff --git a/audio/12.mp3 b/audio/12.mp3
index 5e27bedfea3c81a479d3f828a25a3d068a880445..6a753d53c2f383c479cb9973787491507692384c 100644
--- a/audio/12.mp3
+++ b/audio/12.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:fcc72030e7b00e1fe1cc439adc827178a773a020a0a87e41077bc42ccf42126f
-size 2587436
+oid sha256:5df94fb343c121e973b335952996dfee728aa804f5adfae688651654e30a1c1b
+size 2566124
diff --git a/audio/13.mp3 b/audio/13.mp3
index 1bf9f337fbdc8be5726c5ff504a51444456d59ae..6e2783e42ecbe6946a752ead990ac8b528f8752d 100644
--- a/audio/13.mp3
+++ b/audio/13.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:2ea80e81bd6d3a5fd0addc1a52eca7496af6a382ff83a711675817d274b92300
-size 3605804
+oid sha256:cc2e20543f4f7eada7275e0c7b9fc256023320eb7f067edc41e84ff81c5f633c
+size 3235436
diff --git a/audio/14.mp3 b/audio/14.mp3
index 1e3e602a76377523a560ac7b7c4165900e7152a5..c26a9908e7546cbac92686b02d7f46eeade3f494 100644
--- a/audio/14.mp3
+++ b/audio/14.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:a851078868d37b85ca594db6811fa7a9410764bf5e55f8de9ff57d0724843d98
-size 5389962
+oid sha256:1a692a5efca0e78afd528ef4edcff7d69e64cbb5989ae20f31ad01cf2faeb271
+size 2577644
diff --git a/audio/15.mp3 b/audio/15.mp3
index f9e8f7a5846a7ac0f5cebad7ee4214b58282fd8a..71e19cb0acdc1d576535c3cdd5b6db814c495a7b 100644
--- a/audio/15.mp3
+++ b/audio/15.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:1c914d6e441095bd6a40aa5a558418ae5a321ccf83238b073b8f4f16c6154f39
-size 1672842
+oid sha256:55a4246d9bf9bfdfc1b28b4add8b4a8c746b7473f10e28ff3f909709017b04eb
+size 2070764
diff --git a/audio/16.mp3 b/audio/16.mp3
index 90557fc63594f7799e1923114ab958ead7e8fb88..a42b02acca82456e4c67bb0a1e5e945357a4e720 100644
--- a/audio/16.mp3
+++ b/audio/16.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:736e83bcc12261e6d83c46b915a26d9c4fc4fbffdf441a4b6a1bc896300acf83
-size 649051
+oid sha256:4abd166ccade3f644d84f0b394d7c64f90c5b8adee074b6b0bcfb53b95b8e07d
+size 2814956
diff --git a/audio/17.mp3 b/audio/17.mp3
index 6a753d53c2f383c479cb9973787491507692384c..7e502ec47ad069dfe0de20c460c45c3b101b16cc 100644
--- a/audio/17.mp3
+++ b/audio/17.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5df94fb343c121e973b335952996dfee728aa804f5adfae688651654e30a1c1b
-size 2566124
+oid sha256:e946b6ca2eb04be83bb174d3e358090f03e670c0a9133da4a06406a24d96da31
+size 2668076
diff --git a/audio/18.mp3 b/audio/18.mp3
index 41a901c1c0525914a2e5b0920e60a968ae0c312f..5e27bedfea3c81a479d3f828a25a3d068a880445 100644
--- a/audio/18.mp3
+++ b/audio/18.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:4249ca4f032cf3a438c3f004a48ed2da00c563e603ef284c892302689999bb96
-size 2980844
+oid sha256:fcc72030e7b00e1fe1cc439adc827178a773a020a0a87e41077bc42ccf42126f
+size 2587436
diff --git a/audio/19.mp3 b/audio/19.mp3
index 6e2783e42ecbe6946a752ead990ac8b528f8752d..1bf9f337fbdc8be5726c5ff504a51444456d59ae 100644
--- a/audio/19.mp3
+++ b/audio/19.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:cc2e20543f4f7eada7275e0c7b9fc256023320eb7f067edc41e84ff81c5f633c
-size 3235436
+oid sha256:2ea80e81bd6d3a5fd0addc1a52eca7496af6a382ff83a711675817d274b92300
+size 3605804
diff --git a/audio/2.mp3 b/audio/2.mp3
index 1bf9f337fbdc8be5726c5ff504a51444456d59ae..f1159238bfc01428950bb4924e0443d3e6ac9ee8 100644
--- a/audio/2.mp3
+++ b/audio/2.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:2ea80e81bd6d3a5fd0addc1a52eca7496af6a382ff83a711675817d274b92300
-size 3605804
+oid sha256:a2ef12a7de0780156446249ff4ece8225b803aa3dadaf54ec4f61efdba7e8e9a
+size 1720556
diff --git a/audio/20.mp3 b/audio/20.mp3
index c26a9908e7546cbac92686b02d7f46eeade3f494..1e3e602a76377523a560ac7b7c4165900e7152a5 100644
--- a/audio/20.mp3
+++ b/audio/20.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:1a692a5efca0e78afd528ef4edcff7d69e64cbb5989ae20f31ad01cf2faeb271
-size 2577644
+oid sha256:a851078868d37b85ca594db6811fa7a9410764bf5e55f8de9ff57d0724843d98
+size 5389962
diff --git a/audio/21.mp3 b/audio/21.mp3
index 71e19cb0acdc1d576535c3cdd5b6db814c495a7b..f9e8f7a5846a7ac0f5cebad7ee4214b58282fd8a 100644
--- a/audio/21.mp3
+++ b/audio/21.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:55a4246d9bf9bfdfc1b28b4add8b4a8c746b7473f10e28ff3f909709017b04eb
-size 2070764
+oid sha256:1c914d6e441095bd6a40aa5a558418ae5a321ccf83238b073b8f4f16c6154f39
+size 1672842
diff --git a/audio/22.mp3 b/audio/22.mp3
index a42b02acca82456e4c67bb0a1e5e945357a4e720..90557fc63594f7799e1923114ab958ead7e8fb88 100644
--- a/audio/22.mp3
+++ b/audio/22.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:4abd166ccade3f644d84f0b394d7c64f90c5b8adee074b6b0bcfb53b95b8e07d
-size 2814956
+oid sha256:736e83bcc12261e6d83c46b915a26d9c4fc4fbffdf441a4b6a1bc896300acf83
+size 649051
diff --git a/audio/23.mp3 b/audio/23.mp3
index 7f60b880f0f7e7695a157e9c4c9c588efdffeea4..6a753d53c2f383c479cb9973787491507692384c 100644
--- a/audio/23.mp3
+++ b/audio/23.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:706082dc880f42aa397f9aee429f2f8a4d62fa19417e106e646d6031f91e4f11
-size 7845164
+oid sha256:5df94fb343c121e973b335952996dfee728aa804f5adfae688651654e30a1c1b
+size 2566124
diff --git a/audio/24.mp3 b/audio/24.mp3
index 181b74283416b8efbea4fdc0be68c66a3bd13f2b..41a901c1c0525914a2e5b0920e60a968ae0c312f 100644
--- a/audio/24.mp3
+++ b/audio/24.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:de35c8502abe369ee5876eea1d354a292d135675764ea7606e835a146c7b191c
-size 8816183
+oid sha256:4249ca4f032cf3a438c3f004a48ed2da00c563e603ef284c892302689999bb96
+size 2980844
diff --git a/audio/25.mp3 b/audio/25.mp3
index 4a16df73f1dfc033e4c949b7db5e9eb94d953887..6e2783e42ecbe6946a752ead990ac8b528f8752d 100644
--- a/audio/25.mp3
+++ b/audio/25.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e831b32171884e6ac24dd23e3e54973da861f48f44a47a5e8fbecf2bc6720438
-size 4359902
+oid sha256:cc2e20543f4f7eada7275e0c7b9fc256023320eb7f067edc41e84ff81c5f633c
+size 3235436
diff --git a/audio/26.mp3 b/audio/26.mp3
index fd4876876248f9873fc1e3c60713825b13fde07b..c26a9908e7546cbac92686b02d7f46eeade3f494 100644
--- a/audio/26.mp3
+++ b/audio/26.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:e47a89f43f0de0b54b995ac790cf0041677f8944e208b009dc1caa6029fcf414
-size 888236
+oid sha256:1a692a5efca0e78afd528ef4edcff7d69e64cbb5989ae20f31ad01cf2faeb271
+size 2577644
diff --git a/audio/27.mp3 b/audio/27.mp3
index c7bf9a38a696b47ded9a2fb62a552b74dddfae4c..71e19cb0acdc1d576535c3cdd5b6db814c495a7b 100644
--- a/audio/27.mp3
+++ b/audio/27.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:0fe09dc5bf67af9a6f92fcf38b02508567fb8ce34984e744908386add67de18f
-size 3113324
+oid sha256:55a4246d9bf9bfdfc1b28b4add8b4a8c746b7473f10e28ff3f909709017b04eb
+size 2070764
diff --git a/audio/28.mp3 b/audio/28.mp3
index 25b62c47313ef75995abe35244594e65650334c3..a42b02acca82456e4c67bb0a1e5e945357a4e720 100644
--- a/audio/28.mp3
+++ b/audio/28.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:6a5c658f0ee134e0c31a9ca939ad7805a9a76a321f5e3728dd575ce734c250ae
-size 1484396
+oid sha256:4abd166ccade3f644d84f0b394d7c64f90c5b8adee074b6b0bcfb53b95b8e07d
+size 2814956
diff --git a/audio/29.mp3 b/audio/29.mp3
index 34e9c6880398dfe1d777ed32bd0b8c82b9802f0f..7f60b880f0f7e7695a157e9c4c9c588efdffeea4 100644
--- a/audio/29.mp3
+++ b/audio/29.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:73519e4c374f1b7aa73fafe009ab248ad470a0a17e9b522d265af6293a246021
-size 1006406
+oid sha256:706082dc880f42aa397f9aee429f2f8a4d62fa19417e106e646d6031f91e4f11
+size 7845164
diff --git a/audio/3.mp3 b/audio/3.mp3
index 1e3e602a76377523a560ac7b7c4165900e7152a5..43060c75571659f5d425f123e00767fddc7c82f4 100644
--- a/audio/3.mp3
+++ b/audio/3.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:a851078868d37b85ca594db6811fa7a9410764bf5e55f8de9ff57d0724843d98
-size 5389962
+oid sha256:7ab51a2fd16eed0ac819aa53ae397d0104ad8d803074b554b067f0a79e7b9682
+size 2454956
diff --git a/audio/30.mp3 b/audio/30.mp3
index 73bd073932fdb9455d990f341ce98282d850b363..181b74283416b8efbea4fdc0be68c66a3bd13f2b 100644
--- a/audio/30.mp3
+++ b/audio/30.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1
-size 1524716
+oid sha256:de35c8502abe369ee5876eea1d354a292d135675764ea7606e835a146c7b191c
+size 8816183
diff --git a/audio/31.mp3 b/audio/31.mp3
index 67d92439269479d26ceb87aa36b281d1a75a16c7..4a16df73f1dfc033e4c949b7db5e9eb94d953887 100644
--- a/audio/31.mp3
+++ b/audio/31.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b
-size 4384556
+oid sha256:e831b32171884e6ac24dd23e3e54973da861f48f44a47a5e8fbecf2bc6720438
+size 4359902
diff --git a/audio/32.mp3 b/audio/32.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..fd4876876248f9873fc1e3c60713825b13fde07b
--- /dev/null
+++ b/audio/32.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e47a89f43f0de0b54b995ac790cf0041677f8944e208b009dc1caa6029fcf414
+size 888236
diff --git a/audio/33.mp3 b/audio/33.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..c7bf9a38a696b47ded9a2fb62a552b74dddfae4c
--- /dev/null
+++ b/audio/33.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0fe09dc5bf67af9a6f92fcf38b02508567fb8ce34984e744908386add67de18f
+size 3113324
diff --git a/audio/34.mp3 b/audio/34.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..25b62c47313ef75995abe35244594e65650334c3
--- /dev/null
+++ b/audio/34.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6a5c658f0ee134e0c31a9ca939ad7805a9a76a321f5e3728dd575ce734c250ae
+size 1484396
diff --git a/audio/35.mp3 b/audio/35.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..34e9c6880398dfe1d777ed32bd0b8c82b9802f0f
--- /dev/null
+++ b/audio/35.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:73519e4c374f1b7aa73fafe009ab248ad470a0a17e9b522d265af6293a246021
+size 1006406
diff --git a/audio/36.mp3 b/audio/36.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363
--- /dev/null
+++ b/audio/36.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1
+size 1524716
diff --git a/audio/37.mp3 b/audio/37.mp3
new file mode 100644
index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7
--- /dev/null
+++ b/audio/37.mp3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b
+size 4384556
diff --git a/audio/4.mp3 b/audio/4.mp3
index f9e8f7a5846a7ac0f5cebad7ee4214b58282fd8a..68cc8a4e3cda505e23ab4ffb98c2b646cd58a437 100644
--- a/audio/4.mp3
+++ b/audio/4.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:1c914d6e441095bd6a40aa5a558418ae5a321ccf83238b073b8f4f16c6154f39
-size 1672842
+oid sha256:47b7ba73f546f58959ea56d8618464e480bb6f672b5bf5f70481d0b52dde1db5
+size 790316
diff --git a/audio/5.mp3 b/audio/5.mp3
index 90557fc63594f7799e1923114ab958ead7e8fb88..5e59eaa928ebf573ad57eaecaeb673fca66e5207 100644
--- a/audio/5.mp3
+++ b/audio/5.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:736e83bcc12261e6d83c46b915a26d9c4fc4fbffdf441a4b6a1bc896300acf83
-size 649051
+oid sha256:12f10cf027bdfcea3cd5b83232e7506cd80e10ca65d59ae652dc686644259c2a
+size 2307304
diff --git a/audio/6.mp3 b/audio/6.mp3
index 6a753d53c2f383c479cb9973787491507692384c..7e502ec47ad069dfe0de20c460c45c3b101b16cc 100644
--- a/audio/6.mp3
+++ b/audio/6.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:5df94fb343c121e973b335952996dfee728aa804f5adfae688651654e30a1c1b
-size 2566124
+oid sha256:e946b6ca2eb04be83bb174d3e358090f03e670c0a9133da4a06406a24d96da31
+size 2668076
diff --git a/audio/7.mp3 b/audio/7.mp3
index 6e2783e42ecbe6946a752ead990ac8b528f8752d..5e27bedfea3c81a479d3f828a25a3d068a880445 100644
--- a/audio/7.mp3
+++ b/audio/7.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:cc2e20543f4f7eada7275e0c7b9fc256023320eb7f067edc41e84ff81c5f633c
-size 3235436
+oid sha256:fcc72030e7b00e1fe1cc439adc827178a773a020a0a87e41077bc42ccf42126f
+size 2587436
diff --git a/audio/8.mp3 b/audio/8.mp3
index c26a9908e7546cbac92686b02d7f46eeade3f494..1bf9f337fbdc8be5726c5ff504a51444456d59ae 100644
--- a/audio/8.mp3
+++ b/audio/8.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:1a692a5efca0e78afd528ef4edcff7d69e64cbb5989ae20f31ad01cf2faeb271
-size 2577644
+oid sha256:2ea80e81bd6d3a5fd0addc1a52eca7496af6a382ff83a711675817d274b92300
+size 3605804
diff --git a/audio/9.mp3 b/audio/9.mp3
index 71e19cb0acdc1d576535c3cdd5b6db814c495a7b..1e3e602a76377523a560ac7b7c4165900e7152a5 100644
--- a/audio/9.mp3
+++ b/audio/9.mp3
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:55a4246d9bf9bfdfc1b28b4add8b4a8c746b7473f10e28ff3f909709017b04eb
-size 2070764
+oid sha256:a851078868d37b85ca594db6811fa7a9410764bf5e55f8de9ff57d0724843d98
+size 5389962
diff --git a/transcripts/uncorrected/1.txt b/transcripts/uncorrected/1.txt
index 24994713fc006cf39dff6433f341d9e5b812c141..ffc57e5992be591a97dbd7ee169ed839fe73e975 100644
--- a/transcripts/uncorrected/1.txt
+++ b/transcripts/uncorrected/1.txt
@@ -1 +1 @@
-So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
+I want to add to my DSR Holdings a LLM store TXT. It's almost a pity I didn't talk about this with Shlomo, but a radical idea. It actually, I mean, it appears to be working. I don't know if you're sure where I read from if it just parts my home page or read the txt but I asked Claude to pull in some context data about me into the into the file it seemed to work really well so what the thought I had for I mentioned Shlomo and what I thought about for myself is inbound LLM marketing considering AI traffic.
It's a pity I didn't take some in fact I'll add to the DAM a screenshots folder because a perfect example of a screenshot was the last time that I saw a and I sure I see them almost every day A sign up form where they didn ask for was the LLM your referral source I think it's absolutely insanity that anyone, any company would not have LLM as top of their list of referral sources for traffic.
And this opens up a whole world actually of LLM analytics. and you see which LLMs are scraping our site. LLM optimization. And then basically the idea of being LLM as an inbound pipeline. If you did all this well, could you actually view large language models as an inbound traffic source saying Google's dead, LLM is where it's at.
Here's how you can, I mean, I would have to try these approaches on my own site, but all I can do there is keep optimizing and see if someone says, if you typed into ChatGPT in a month and said, I need someone who's good with AI in Jerusalem, Israel. Can you find any profiles? And if it worked, that would almost be the opposite to pursue the outbound track as well for jobs. But as a complementary angle of attack, I think it would be very interesting to see as an experiment even.
\ No newline at end of file
diff --git a/transcripts/uncorrected/10.txt b/transcripts/uncorrected/10.txt
index e3960e6d457375f71a0aa63d07c4c8ad4af74fc2..acadef7c73d2b38c88ec7b03751c008a67eca4fc 100644
--- a/transcripts/uncorrected/10.txt
+++ b/transcripts/uncorrected/10.txt
@@ -1 +1 @@
-Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
+Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
diff --git a/transcripts/uncorrected/11.txt b/transcripts/uncorrected/11.txt
index 4215c595a95e066a9ecda2a2ae08b9013686c002..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 100644
--- a/transcripts/uncorrected/11.txt
+++ b/transcripts/uncorrected/11.txt
@@ -1 +1 @@
-Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.
Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.
Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.
The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.
Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.
The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer.
\ No newline at end of file
+Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
diff --git a/transcripts/uncorrected/12.txt b/transcripts/uncorrected/12.txt
index 145fac41057e67a2489a588fef1f5d5a4b0df965..353b380ddee0d6134e7cfc905de9171524ef566e 100644
--- a/transcripts/uncorrected/12.txt
+++ b/transcripts/uncorrected/12.txt
@@ -1 +1 @@
-Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.
Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.
Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.
And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK
So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.
I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.
I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.
So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that !
\ No newline at end of file
+I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
diff --git a/transcripts/uncorrected/13.txt b/transcripts/uncorrected/13.txt
index b314f3f74074ca02c2a47132cea688da6abb56d9..0ec335394a72e80887a3672f290bc5828d8227e0 100644
--- a/transcripts/uncorrected/13.txt
+++ b/transcripts/uncorrected/13.txt
@@ -1 +1 @@
-Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.
Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.
And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.
And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.
But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be.
\ No newline at end of file
+I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
diff --git a/transcripts/uncorrected/14.txt b/transcripts/uncorrected/14.txt
index 8d2caf72445f7704d8455a3c2b790fdf76026b9e..243f36cf36c052964af7ebe83a792dae9e67d205 100644
--- a/transcripts/uncorrected/14.txt
+++ b/transcripts/uncorrected/14.txt
@@ -1 +1 @@
-The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.
I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system.
\ No newline at end of file
+I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/15.txt b/transcripts/uncorrected/15.txt
index 2acd54bd254b2cdcc6a5457142eb4e0e917685f0..35a55fa10abb62fbf49bc2c38d73e8cc53fca620 100644
--- a/transcripts/uncorrected/15.txt
+++ b/transcripts/uncorrected/15.txt
@@ -1 +1 @@
-Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.
The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.
I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.
What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice.
\ No newline at end of file
+This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
diff --git a/transcripts/uncorrected/16.txt b/transcripts/uncorrected/16.txt
index b2de03d17424a2fed8639d2dfa09c98e84d864d7..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2 100644
--- a/transcripts/uncorrected/16.txt
+++ b/transcripts/uncorrected/16.txt
@@ -1 +1 @@
-It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.
And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached.
\ No newline at end of file
+Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/17.txt b/transcripts/uncorrected/17.txt
index f2066bdff489a0e7af0c17fa8ccf736412194aad..4215c595a95e066a9ecda2a2ae08b9013686c002 100644
--- a/transcripts/uncorrected/17.txt
+++ b/transcripts/uncorrected/17.txt
@@ -1 +1 @@
-Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.
Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.
So if you happen to know, you should know of any products on AliExpress and product numbers, list them please.
\ No newline at end of file
+Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.
Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.
Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.
The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.
Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.
The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer.
\ No newline at end of file
diff --git a/transcripts/uncorrected/18.txt b/transcripts/uncorrected/18.txt
index 73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c..145fac41057e67a2489a588fef1f5d5a4b0df965 100644
--- a/transcripts/uncorrected/18.txt
+++ b/transcripts/uncorrected/18.txt
@@ -1 +1 @@
-I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
+Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.
Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.
Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.
And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK
So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.
I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.
I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.
So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that !
\ No newline at end of file
diff --git a/transcripts/uncorrected/19.txt b/transcripts/uncorrected/19.txt
index 24994713fc006cf39dff6433f341d9e5b812c141..b314f3f74074ca02c2a47132cea688da6abb56d9 100644
--- a/transcripts/uncorrected/19.txt
+++ b/transcripts/uncorrected/19.txt
@@ -1 +1 @@
-So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
+Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.
Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.
And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.
And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.
But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be.
\ No newline at end of file
diff --git a/transcripts/uncorrected/2.txt b/transcripts/uncorrected/2.txt
index 8eb532b0a713565b3b2fae20960656ec0d9e6e2f..e9383aa5db79a22c214793ffdd4a93fc6ed49a60 100644
--- a/transcripts/uncorrected/2.txt
+++ b/transcripts/uncorrected/2.txt
@@ -1 +1 @@
-Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
+Can I just make a suggestion? Before we proceed in this direction, I think that it definitely is the right content environment. But the reason I've created these is so that we have them ready for recurrent use. So Lama Index is very, very good and would be used for a lot of very versatile.
So before we start, let's update the cond environment to install all the different utilities we might need for tokenizing text, processing markdown, markdown to PDF, PDF splitting, all these different text utilities. Even ImageMagick typesetting utilities. Once we have that ready then we can begin. But let's get that environment good first if we can use a conda.yaml to define it.
In other words, take in the existing environment, make a few edits and then install that. Just remember there's an AMD GPU so it will affect the choice of packages.
\ No newline at end of file
diff --git a/transcripts/uncorrected/20.txt b/transcripts/uncorrected/20.txt
index 5eac1414e49e1b8618ce1ba2193d7d10b91f431a..8d2caf72445f7704d8455a3c2b790fdf76026b9e 100644
--- a/transcripts/uncorrected/20.txt
+++ b/transcripts/uncorrected/20.txt
@@ -1 +1 @@
-I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.
In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end?
\ No newline at end of file
+The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.
I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system.
\ No newline at end of file
diff --git a/transcripts/uncorrected/21.txt b/transcripts/uncorrected/21.txt
index 8eb532b0a713565b3b2fae20960656ec0d9e6e2f..2acd54bd254b2cdcc6a5457142eb4e0e917685f0 100644
--- a/transcripts/uncorrected/21.txt
+++ b/transcripts/uncorrected/21.txt
@@ -1 +1 @@
-Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
+Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.
The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.
I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.
What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice.
\ No newline at end of file
diff --git a/transcripts/uncorrected/22.txt b/transcripts/uncorrected/22.txt
index 492695d3c04244eba8ee90b40f4d0ed8cbb6793b..b2de03d17424a2fed8639d2dfa09c98e84d864d7 100644
--- a/transcripts/uncorrected/22.txt
+++ b/transcripts/uncorrected/22.txt
@@ -1 +1 @@
-Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
+It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.
And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached.
\ No newline at end of file
diff --git a/transcripts/uncorrected/23.txt b/transcripts/uncorrected/23.txt
index acadef7c73d2b38c88ec7b03751c008a67eca4fc..f2066bdff489a0e7af0c17fa8ccf736412194aad 100644
--- a/transcripts/uncorrected/23.txt
+++ b/transcripts/uncorrected/23.txt
@@ -1 +1 @@
-Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
+Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.
Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.
So if you happen to know, you should know of any products on AliExpress and product numbers, list them please.
\ No newline at end of file
diff --git a/transcripts/uncorrected/24.txt b/transcripts/uncorrected/24.txt
index 48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c 100644
--- a/transcripts/uncorrected/24.txt
+++ b/transcripts/uncorrected/24.txt
@@ -1 +1 @@
-Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
+I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
diff --git a/transcripts/uncorrected/25.txt b/transcripts/uncorrected/25.txt
index 353b380ddee0d6134e7cfc905de9171524ef566e..24994713fc006cf39dff6433f341d9e5b812c141 100644
--- a/transcripts/uncorrected/25.txt
+++ b/transcripts/uncorrected/25.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
+So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
diff --git a/transcripts/uncorrected/26.txt b/transcripts/uncorrected/26.txt
index da218ad130c3c5a5f3ca672509c6c517f4fa87f2..5eac1414e49e1b8618ce1ba2193d7d10b91f431a 100644
--- a/transcripts/uncorrected/26.txt
+++ b/transcripts/uncorrected/26.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.
When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.
The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.
This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case.
\ No newline at end of file
+I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.
In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end?
\ No newline at end of file
diff --git a/transcripts/uncorrected/27.txt b/transcripts/uncorrected/27.txt
index 0ec335394a72e80887a3672f290bc5828d8227e0..8eb532b0a713565b3b2fae20960656ec0d9e6e2f 100644
--- a/transcripts/uncorrected/27.txt
+++ b/transcripts/uncorrected/27.txt
@@ -1 +1 @@
-I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
+Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
diff --git a/transcripts/uncorrected/28.txt b/transcripts/uncorrected/28.txt
index 243f36cf36c052964af7ebe83a792dae9e67d205..492695d3c04244eba8ee90b40f4d0ed8cbb6793b 100644
--- a/transcripts/uncorrected/28.txt
+++ b/transcripts/uncorrected/28.txt
@@ -1 +1 @@
-I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
+Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
diff --git a/transcripts/uncorrected/29.txt b/transcripts/uncorrected/29.txt
index 35a55fa10abb62fbf49bc2c38d73e8cc53fca620..acadef7c73d2b38c88ec7b03751c008a67eca4fc 100644
--- a/transcripts/uncorrected/29.txt
+++ b/transcripts/uncorrected/29.txt
@@ -1 +1 @@
-This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
+Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
diff --git a/transcripts/uncorrected/3.txt b/transcripts/uncorrected/3.txt
index 492695d3c04244eba8ee90b40f4d0ed8cbb6793b..68f0272363ffede253054f91243a4d0b8203d19b 100644
--- a/transcripts/uncorrected/3.txt
+++ b/transcripts/uncorrected/3.txt
@@ -1 +1 @@
-Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file
+Okay, here's just a few more specific things that I want to include. So I see you mentioning hydration drinks, which is very important. Electrolyte tablets become very expensive. So there's a few things I'd like to explore. More cost-effective ways for making them. I think you can buy them as a dry powder is one idea. The second one is a homemade recipe.
The next set of ideas is I really really need to always have some kind of food stuff at home ready to eat. So there's a few things in that regard. A list of a kind of basic pantry shopping list. Obviously optimized for all the dietary recommendations we've discussed here. Suggestions for, and I think protein bars aren't really enough, it needs to be carbohydrate as well. Recipes or suggestions for homemade protein bars for the same reason that they become very expensive to buy them individually.
That's probably the key thing I'm looking for at the moment is to have always on hand the ingredients and ideally like kind of a backup layer like I kind of make these protein bars but I also and that's kind of the fallback but ideally I prefer to obviously eat and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/30.txt b/transcripts/uncorrected/30.txt
index e3960e6d457375f71a0aa63d07c4c8ad4af74fc2..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 100644
--- a/transcripts/uncorrected/30.txt
+++ b/transcripts/uncorrected/30.txt
@@ -1 +1 @@
-Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
+Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
diff --git a/transcripts/uncorrected/31.txt b/transcripts/uncorrected/31.txt
index 73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2..353b380ddee0d6134e7cfc905de9171524ef566e 100644
--- a/transcripts/uncorrected/31.txt
+++ b/transcripts/uncorrected/31.txt
@@ -1 +1 @@
-Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.
So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.
So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.
So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.
The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot
\ No newline at end of file
+I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
diff --git a/transcripts/uncorrected/32.txt b/transcripts/uncorrected/32.txt
new file mode 100644
index 0000000000000000000000000000000000000000..da218ad130c3c5a5f3ca672509c6c517f4fa87f2
--- /dev/null
+++ b/transcripts/uncorrected/32.txt
@@ -0,0 +1 @@
+I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.
When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.
The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.
This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/33.txt b/transcripts/uncorrected/33.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0ec335394a72e80887a3672f290bc5828d8227e0
--- /dev/null
+++ b/transcripts/uncorrected/33.txt
@@ -0,0 +1 @@
+I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
diff --git a/transcripts/uncorrected/34.txt b/transcripts/uncorrected/34.txt
new file mode 100644
index 0000000000000000000000000000000000000000..243f36cf36c052964af7ebe83a792dae9e67d205
--- /dev/null
+++ b/transcripts/uncorrected/34.txt
@@ -0,0 +1 @@
+I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
diff --git a/transcripts/uncorrected/35.txt b/transcripts/uncorrected/35.txt
new file mode 100644
index 0000000000000000000000000000000000000000..35a55fa10abb62fbf49bc2c38d73e8cc53fca620
--- /dev/null
+++ b/transcripts/uncorrected/35.txt
@@ -0,0 +1 @@
+This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
diff --git a/transcripts/uncorrected/36.txt b/transcripts/uncorrected/36.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2
--- /dev/null
+++ b/transcripts/uncorrected/36.txt
@@ -0,0 +1 @@
+Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.
Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.
And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on.
\ No newline at end of file
diff --git a/transcripts/uncorrected/37.txt b/transcripts/uncorrected/37.txt
new file mode 100644
index 0000000000000000000000000000000000000000..73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2
--- /dev/null
+++ b/transcripts/uncorrected/37.txt
@@ -0,0 +1 @@
+Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.
So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.
So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.
So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.
The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot
\ No newline at end of file
diff --git a/transcripts/uncorrected/4.txt b/transcripts/uncorrected/4.txt
index acadef7c73d2b38c88ec7b03751c008a67eca4fc..b373213f419ec9b2e4b9ca165f42170441577ed2 100644
--- a/transcripts/uncorrected/4.txt
+++ b/transcripts/uncorrected/4.txt
@@ -1 +1 @@
-Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.
Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe.
\ No newline at end of file
+Okay there's a bunch of memory layer projects now to explore later that are actually it's not longer separation between vector storage and memory which makes sense because it's kind of basically the same server it's offered by API mem0 super memory remember api memories.api that's a good starter list and they can all be integrated and used they'll do the vector backend so I'm using I'm testing it out on the documentary finding one, but just to see the concept and how it works with agency.
\ No newline at end of file
diff --git a/transcripts/uncorrected/5.txt b/transcripts/uncorrected/5.txt
index 48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45..847a19b97210af5a0d79cb54c259b54cbe8103aa 100644
--- a/transcripts/uncorrected/5.txt
+++ b/transcripts/uncorrected/5.txt
@@ -1 +1 @@
-Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.
Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends.
\ No newline at end of file
+Create now a meetings taker, meetings minute producer. It will have the following functionality. The user will upload a recording of meetings, of a meeting that took place. and we'll provide then there will be a section so that's an audio upload functionality the next one will be a meeting participants the user will provide the names and identifying characteristics of people who are audible in the recording so it'll say like for example and there should be Name, Description, Daniel, male voice in the recording, Hannah, female voice in the recording.
Upon receiving both of these things, it will send it to Gemini Multimodal in order to produce two things One is a transcript, slightly cleaned up diaries transcript That's one output and the second one is a minute which is a automatically generated minutes formatted with decisions, action items for each participant.
And then it should be integrated with Google Drive so the user can connect their Google Drive and save them to a folder after they've been generated and view them in the app.
\ No newline at end of file
diff --git a/transcripts/uncorrected/6.txt b/transcripts/uncorrected/6.txt
index 353b380ddee0d6134e7cfc905de9171524ef566e..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c 100644
--- a/transcripts/uncorrected/6.txt
+++ b/transcripts/uncorrected/6.txt
@@ -1 +1 @@
-I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.
And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.
And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that.
\ No newline at end of file
+I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.
I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations.
\ No newline at end of file
diff --git a/transcripts/uncorrected/7.txt b/transcripts/uncorrected/7.txt
index 0ec335394a72e80887a3672f290bc5828d8227e0..24994713fc006cf39dff6433f341d9e5b812c141 100644
--- a/transcripts/uncorrected/7.txt
+++ b/transcripts/uncorrected/7.txt
@@ -1 +1 @@
-I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.
And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.
It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda.
\ No newline at end of file
+So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.
What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.
A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner.
\ No newline at end of file
diff --git a/transcripts/uncorrected/8.txt b/transcripts/uncorrected/8.txt
index 243f36cf36c052964af7ebe83a792dae9e67d205..8eb532b0a713565b3b2fae20960656ec0d9e6e2f 100644
--- a/transcripts/uncorrected/8.txt
+++ b/transcripts/uncorrected/8.txt
@@ -1 +1 @@
-I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.
But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.
Come up with an imaginative name for this use case.
\ No newline at end of file
+Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.
They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.
Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for.
\ No newline at end of file
diff --git a/transcripts/uncorrected/9.txt b/transcripts/uncorrected/9.txt
index 35a55fa10abb62fbf49bc2c38d73e8cc53fca620..492695d3c04244eba8ee90b40f4d0ed8cbb6793b 100644
--- a/transcripts/uncorrected/9.txt
+++ b/transcripts/uncorrected/9.txt
@@ -1 +1 @@
-This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.
So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links.
\ No newline at end of file
+Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.
And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.
So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.
What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.
Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it.
\ No newline at end of file