diff --git a/annotations/11.json b/annotations/11.json new file mode 100644 index 0000000000000000000000000000000000000000..df9d6461d80fb3265b7b8db50cac58abc103452e --- /dev/null +++ b/annotations/11.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f1c4f177ccfe50a3696c0c942f45438981346ec81c99464b80ba194b5338f3b +size 603 diff --git a/annotations/12.json b/annotations/12.json new file mode 100644 index 0000000000000000000000000000000000000000..4affe1227e3c441d64766162a2e688f6f78445fd --- /dev/null +++ b/annotations/12.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23b04809b340ed4351ff5b213e95b16a96224053a74674a3a1ac372e4e562d25 +size 603 diff --git a/annotations/13.json b/annotations/13.json new file mode 100644 index 0000000000000000000000000000000000000000..244fe3b219be7a39279e71764ef4042847553248 --- /dev/null +++ b/annotations/13.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd1d1128982fb4615867f14d828dd1e09b550fb6c220cddf57d0e8e66fc4588 +size 604 diff --git a/annotations/14.json b/annotations/14.json new file mode 100644 index 0000000000000000000000000000000000000000..76d5ae2b9e2f11f3774df97d571d6391920ab5dd --- /dev/null +++ b/annotations/14.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f33eb4953b7b482e32c591b6f10a065ca520b77ee8cd4d36b77af25f0f910d1e +size 599 diff --git a/annotations/15.json b/annotations/15.json new file mode 100644 index 0000000000000000000000000000000000000000..254724e0bc3910bb84f081b3609f8d9805ef1bd9 --- /dev/null +++ b/annotations/15.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfbe3c38c4fe4fbe7cd336c5e3c86bb3f53fff082b0bb0fdab7a9dda85ffa1a2 +size 602 diff --git a/annotations/16.json b/annotations/16.json new file mode 100644 index 0000000000000000000000000000000000000000..0ba7c6c93a7666f98666add35ea506cf2106da01 --- /dev/null +++ b/annotations/16.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f615f11f470c0e2538230b7ca93a9809a575b51d40ad9d1e8c3a13b46285c650 +size 599 diff --git a/annotations/17.json b/annotations/17.json new file mode 100644 index 0000000000000000000000000000000000000000..696759c15f2c5ee6e429254fd4823b4ab98c77e1 --- /dev/null +++ b/annotations/17.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3751bb4cd150a363d1a335dfaa4f84500cd63a108b180343313b3679c015396e +size 599 diff --git a/annotations/18.json b/annotations/18.json new file mode 100644 index 0000000000000000000000000000000000000000..72ee8864498f7e6eca12eb21c9e9fb32b549398b --- /dev/null +++ b/annotations/18.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd3ae4cdd7180b1689f90371faf19e9e70db9f7c0c34ebdc3c852803cfc2bf9d +size 604 diff --git a/annotations/19.json b/annotations/19.json new file mode 100644 index 0000000000000000000000000000000000000000..f987f5005d208af71e318379c099bbfd6bbcf140 --- /dev/null +++ b/annotations/19.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bfef98966c657fe946f612142a5987f16ce3139213305b19f03bd5fcc0e607fc +size 598 diff --git a/annotations/20.json b/annotations/20.json new file mode 100644 index 0000000000000000000000000000000000000000..08abb78e02ff7f24720ee21a7958fc059b82e848 --- /dev/null +++ b/annotations/20.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f21aa8fea1718711c927d7a6f3e3bf2b1a176b997c4e09d77e8983d1501239c +size 599 diff --git a/annotations/21.json b/annotations/21.json new file mode 100644 index 0000000000000000000000000000000000000000..660a4d8f559dc589d72d3aa398048ed58a421205 --- /dev/null +++ b/annotations/21.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55463bdd516496a7c512b8452cba5f761ed505abeef49ee3b4b3a9fa9df98f18 +size 603 diff --git a/annotations/22.json b/annotations/22.json new file mode 100644 index 0000000000000000000000000000000000000000..a7414d3aa1a0ac33de288534bcad553b3c701a84 --- /dev/null +++ b/annotations/22.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45147b91b931b686dc1e6d395483a6ddb74482dd045b0f88fe8363bfd1527b53 +size 604 diff --git a/annotations/23.json b/annotations/23.json new file mode 100644 index 0000000000000000000000000000000000000000..038a3c4fa73a731358ce1273b9e1146fb3ab4a71 --- /dev/null +++ b/annotations/23.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e74adfbe606649bcbbec8b7436a2cd5a7eba62609c84900f4b83b8dc8e83ed77 +size 596 diff --git a/annotations/24.json b/annotations/24.json new file mode 100644 index 0000000000000000000000000000000000000000..6744fb89b6c47e0883f0c194e79276284290c6fb --- /dev/null +++ b/annotations/24.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c0f6dc940546068525917dc4821dda6535bd186e7df6d0c9d2498cd1dc48a70 +size 598 diff --git a/annotations/25.json b/annotations/25.json new file mode 100644 index 0000000000000000000000000000000000000000..5959486353a9f011c55f162ee6e4657d2260a2f4 --- /dev/null +++ b/annotations/25.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adfb4bd4dc58ab10a139a92bab2550d6d29063260ecd5f906b078b7c60af0ae8 +size 598 diff --git a/annotations/26.json b/annotations/26.json new file mode 100644 index 0000000000000000000000000000000000000000..bdde05482d09b55340775277b9e4f66634807f85 --- /dev/null +++ b/annotations/26.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4336e3a5f450ab3c528e606b5a02e39bcb1c4e7488e0acbbee7784cc444c93fe +size 603 diff --git a/annotations/27.json b/annotations/27.json new file mode 100644 index 0000000000000000000000000000000000000000..29a6c795b0f523ba4112feac518a8e49171aac8e --- /dev/null +++ b/annotations/27.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa16f92d2cfa0e80af3f2915abdb4d4e31c96d15c74576e3904fe1de72dee0e9 +size 602 diff --git a/annotations/28.json b/annotations/28.json new file mode 100644 index 0000000000000000000000000000000000000000..178c404dbd9b9454213d00d6328b76d23e744659 --- /dev/null +++ b/annotations/28.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fe952254097a84d1720617b0d8b593872a667fa09901a847b9368aaf9b3f7f6 +size 600 diff --git a/annotations/29.json b/annotations/29.json new file mode 100644 index 0000000000000000000000000000000000000000..60bc49a2b1325b7f5d149c2c6870c789d4deab96 --- /dev/null +++ b/annotations/29.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:837d7dea72fa1401b3b861d1282f52727c7955a98eb76db0b01eda5a54f08c25 +size 601 diff --git a/annotations/30.json b/annotations/30.json new file mode 100644 index 0000000000000000000000000000000000000000..fab3aec15bf2550b7466b5ee96881dba9b033744 --- /dev/null +++ b/annotations/30.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5da54d91c446e68faa415b7012b9381cb9963aa2c53b83d82c977f3862b9a7e +size 598 diff --git a/annotations/31.json b/annotations/31.json new file mode 100644 index 0000000000000000000000000000000000000000..b3a641b3e56eaa387e5c0e28b0599a2393f3b092 --- /dev/null +++ b/annotations/31.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c7432d6afb78d676eb0647f804f486661251f8d45f0c0dd862adc717704e3e7 +size 602 diff --git a/audio/11.mp3 b/audio/11.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7e502ec47ad069dfe0de20c460c45c3b101b16cc --- /dev/null +++ b/audio/11.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e946b6ca2eb04be83bb174d3e358090f03e670c0a9133da4a06406a24d96da31 +size 2668076 diff --git a/audio/12.mp3 b/audio/12.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..5e27bedfea3c81a479d3f828a25a3d068a880445 --- /dev/null +++ b/audio/12.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcc72030e7b00e1fe1cc439adc827178a773a020a0a87e41077bc42ccf42126f +size 2587436 diff --git a/audio/13.mp3 b/audio/13.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..1bf9f337fbdc8be5726c5ff504a51444456d59ae --- /dev/null +++ b/audio/13.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ea80e81bd6d3a5fd0addc1a52eca7496af6a382ff83a711675817d274b92300 +size 3605804 diff --git a/audio/14.mp3 b/audio/14.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..1e3e602a76377523a560ac7b7c4165900e7152a5 --- /dev/null +++ b/audio/14.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a851078868d37b85ca594db6811fa7a9410764bf5e55f8de9ff57d0724843d98 +size 5389962 diff --git a/audio/15.mp3 b/audio/15.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..f9e8f7a5846a7ac0f5cebad7ee4214b58282fd8a --- /dev/null +++ b/audio/15.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c914d6e441095bd6a40aa5a558418ae5a321ccf83238b073b8f4f16c6154f39 +size 1672842 diff --git a/audio/16.mp3 b/audio/16.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..90557fc63594f7799e1923114ab958ead7e8fb88 --- /dev/null +++ b/audio/16.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:736e83bcc12261e6d83c46b915a26d9c4fc4fbffdf441a4b6a1bc896300acf83 +size 649051 diff --git a/audio/17.mp3 b/audio/17.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6a753d53c2f383c479cb9973787491507692384c --- /dev/null +++ b/audio/17.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5df94fb343c121e973b335952996dfee728aa804f5adfae688651654e30a1c1b +size 2566124 diff --git a/audio/18.mp3 b/audio/18.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..41a901c1c0525914a2e5b0920e60a968ae0c312f --- /dev/null +++ b/audio/18.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4249ca4f032cf3a438c3f004a48ed2da00c563e603ef284c892302689999bb96 +size 2980844 diff --git a/audio/19.mp3 b/audio/19.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6e2783e42ecbe6946a752ead990ac8b528f8752d --- /dev/null +++ b/audio/19.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc2e20543f4f7eada7275e0c7b9fc256023320eb7f067edc41e84ff81c5f633c +size 3235436 diff --git a/audio/20.mp3 b/audio/20.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c26a9908e7546cbac92686b02d7f46eeade3f494 --- /dev/null +++ b/audio/20.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a692a5efca0e78afd528ef4edcff7d69e64cbb5989ae20f31ad01cf2faeb271 +size 2577644 diff --git a/audio/21.mp3 b/audio/21.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..71e19cb0acdc1d576535c3cdd5b6db814c495a7b --- /dev/null +++ b/audio/21.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a4246d9bf9bfdfc1b28b4add8b4a8c746b7473f10e28ff3f909709017b04eb +size 2070764 diff --git a/audio/22.mp3 b/audio/22.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..a42b02acca82456e4c67bb0a1e5e945357a4e720 --- /dev/null +++ b/audio/22.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4abd166ccade3f644d84f0b394d7c64f90c5b8adee074b6b0bcfb53b95b8e07d +size 2814956 diff --git a/audio/23.mp3 b/audio/23.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7f60b880f0f7e7695a157e9c4c9c588efdffeea4 --- /dev/null +++ b/audio/23.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:706082dc880f42aa397f9aee429f2f8a4d62fa19417e106e646d6031f91e4f11 +size 7845164 diff --git a/audio/24.mp3 b/audio/24.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..181b74283416b8efbea4fdc0be68c66a3bd13f2b --- /dev/null +++ b/audio/24.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de35c8502abe369ee5876eea1d354a292d135675764ea7606e835a146c7b191c +size 8816183 diff --git a/audio/25.mp3 b/audio/25.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..4a16df73f1dfc033e4c949b7db5e9eb94d953887 --- /dev/null +++ b/audio/25.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e831b32171884e6ac24dd23e3e54973da861f48f44a47a5e8fbecf2bc6720438 +size 4359902 diff --git a/audio/26.mp3 b/audio/26.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..fd4876876248f9873fc1e3c60713825b13fde07b --- /dev/null +++ b/audio/26.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e47a89f43f0de0b54b995ac790cf0041677f8944e208b009dc1caa6029fcf414 +size 888236 diff --git a/audio/27.mp3 b/audio/27.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c7bf9a38a696b47ded9a2fb62a552b74dddfae4c --- /dev/null +++ b/audio/27.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fe09dc5bf67af9a6f92fcf38b02508567fb8ce34984e744908386add67de18f +size 3113324 diff --git a/audio/28.mp3 b/audio/28.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..25b62c47313ef75995abe35244594e65650334c3 --- /dev/null +++ b/audio/28.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a5c658f0ee134e0c31a9ca939ad7805a9a76a321f5e3728dd575ce734c250ae +size 1484396 diff --git a/audio/29.mp3 b/audio/29.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..34e9c6880398dfe1d777ed32bd0b8c82b9802f0f --- /dev/null +++ b/audio/29.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73519e4c374f1b7aa73fafe009ab248ad470a0a17e9b522d265af6293a246021 +size 1006406 diff --git a/audio/30.mp3 b/audio/30.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363 --- /dev/null +++ b/audio/30.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1 +size 1524716 diff --git a/audio/31.mp3 b/audio/31.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7 --- /dev/null +++ b/audio/31.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b +size 4384556 diff --git a/transcripts/uncorrected/11.txt b/transcripts/uncorrected/11.txt new file mode 100644 index 0000000000000000000000000000000000000000..4215c595a95e066a9ecda2a2ae08b9013686c002 --- /dev/null +++ b/transcripts/uncorrected/11.txt @@ -0,0 +1 @@ +Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.

Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.

Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.

The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.

Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.

The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer. \ No newline at end of file diff --git a/transcripts/uncorrected/12.txt b/transcripts/uncorrected/12.txt new file mode 100644 index 0000000000000000000000000000000000000000..145fac41057e67a2489a588fef1f5d5a4b0df965 --- /dev/null +++ b/transcripts/uncorrected/12.txt @@ -0,0 +1 @@ +Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.

Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.

Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.

And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK

So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.

I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.

I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.

So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that ! \ No newline at end of file diff --git a/transcripts/uncorrected/13.txt b/transcripts/uncorrected/13.txt new file mode 100644 index 0000000000000000000000000000000000000000..b314f3f74074ca02c2a47132cea688da6abb56d9 --- /dev/null +++ b/transcripts/uncorrected/13.txt @@ -0,0 +1 @@ +Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.

Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.

And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.

And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.

But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be. \ No newline at end of file diff --git a/transcripts/uncorrected/14.txt b/transcripts/uncorrected/14.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d2caf72445f7704d8455a3c2b790fdf76026b9e --- /dev/null +++ b/transcripts/uncorrected/14.txt @@ -0,0 +1 @@ +The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.

I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system. \ No newline at end of file diff --git a/transcripts/uncorrected/15.txt b/transcripts/uncorrected/15.txt new file mode 100644 index 0000000000000000000000000000000000000000..2acd54bd254b2cdcc6a5457142eb4e0e917685f0 --- /dev/null +++ b/transcripts/uncorrected/15.txt @@ -0,0 +1 @@ +Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.

The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.

I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.

What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice. \ No newline at end of file diff --git a/transcripts/uncorrected/16.txt b/transcripts/uncorrected/16.txt new file mode 100644 index 0000000000000000000000000000000000000000..b2de03d17424a2fed8639d2dfa09c98e84d864d7 --- /dev/null +++ b/transcripts/uncorrected/16.txt @@ -0,0 +1 @@ +It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.

And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached. \ No newline at end of file diff --git a/transcripts/uncorrected/17.txt b/transcripts/uncorrected/17.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2066bdff489a0e7af0c17fa8ccf736412194aad --- /dev/null +++ b/transcripts/uncorrected/17.txt @@ -0,0 +1 @@ +Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.

Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.

So if you happen to know, you should know of any products on AliExpress and product numbers, list them please. \ No newline at end of file diff --git a/transcripts/uncorrected/18.txt b/transcripts/uncorrected/18.txt new file mode 100644 index 0000000000000000000000000000000000000000..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c --- /dev/null +++ b/transcripts/uncorrected/18.txt @@ -0,0 +1 @@ +I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.

I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations. \ No newline at end of file diff --git a/transcripts/uncorrected/19.txt b/transcripts/uncorrected/19.txt new file mode 100644 index 0000000000000000000000000000000000000000..24994713fc006cf39dff6433f341d9e5b812c141 --- /dev/null +++ b/transcripts/uncorrected/19.txt @@ -0,0 +1 @@ +So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.

What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.

A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner. \ No newline at end of file diff --git a/transcripts/uncorrected/20.txt b/transcripts/uncorrected/20.txt new file mode 100644 index 0000000000000000000000000000000000000000..5eac1414e49e1b8618ce1ba2193d7d10b91f431a --- /dev/null +++ b/transcripts/uncorrected/20.txt @@ -0,0 +1 @@ +I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.

In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end? \ No newline at end of file diff --git a/transcripts/uncorrected/21.txt b/transcripts/uncorrected/21.txt new file mode 100644 index 0000000000000000000000000000000000000000..8eb532b0a713565b3b2fae20960656ec0d9e6e2f --- /dev/null +++ b/transcripts/uncorrected/21.txt @@ -0,0 +1 @@ +Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.

They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.

Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for. \ No newline at end of file diff --git a/transcripts/uncorrected/22.txt b/transcripts/uncorrected/22.txt new file mode 100644 index 0000000000000000000000000000000000000000..492695d3c04244eba8ee90b40f4d0ed8cbb6793b --- /dev/null +++ b/transcripts/uncorrected/22.txt @@ -0,0 +1 @@ +Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.

And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.

So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.

What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.

Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it. \ No newline at end of file diff --git a/transcripts/uncorrected/23.txt b/transcripts/uncorrected/23.txt new file mode 100644 index 0000000000000000000000000000000000000000..acadef7c73d2b38c88ec7b03751c008a67eca4fc --- /dev/null +++ b/transcripts/uncorrected/23.txt @@ -0,0 +1 @@ +Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.

Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe. \ No newline at end of file diff --git a/transcripts/uncorrected/24.txt b/transcripts/uncorrected/24.txt new file mode 100644 index 0000000000000000000000000000000000000000..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 --- /dev/null +++ b/transcripts/uncorrected/24.txt @@ -0,0 +1 @@ +Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.

Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends. \ No newline at end of file diff --git a/transcripts/uncorrected/25.txt b/transcripts/uncorrected/25.txt new file mode 100644 index 0000000000000000000000000000000000000000..353b380ddee0d6134e7cfc905de9171524ef566e --- /dev/null +++ b/transcripts/uncorrected/25.txt @@ -0,0 +1 @@ +I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.

And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.

And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that. \ No newline at end of file diff --git a/transcripts/uncorrected/26.txt b/transcripts/uncorrected/26.txt new file mode 100644 index 0000000000000000000000000000000000000000..da218ad130c3c5a5f3ca672509c6c517f4fa87f2 --- /dev/null +++ b/transcripts/uncorrected/26.txt @@ -0,0 +1 @@ +I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.

When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.

The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.

This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case. \ No newline at end of file diff --git a/transcripts/uncorrected/27.txt b/transcripts/uncorrected/27.txt new file mode 100644 index 0000000000000000000000000000000000000000..0ec335394a72e80887a3672f290bc5828d8227e0 --- /dev/null +++ b/transcripts/uncorrected/27.txt @@ -0,0 +1 @@ +I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.

And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.

It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda. \ No newline at end of file diff --git a/transcripts/uncorrected/28.txt b/transcripts/uncorrected/28.txt new file mode 100644 index 0000000000000000000000000000000000000000..243f36cf36c052964af7ebe83a792dae9e67d205 --- /dev/null +++ b/transcripts/uncorrected/28.txt @@ -0,0 +1 @@ +I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.

But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.

Come up with an imaginative name for this use case. \ No newline at end of file diff --git a/transcripts/uncorrected/29.txt b/transcripts/uncorrected/29.txt new file mode 100644 index 0000000000000000000000000000000000000000..35a55fa10abb62fbf49bc2c38d73e8cc53fca620 --- /dev/null +++ b/transcripts/uncorrected/29.txt @@ -0,0 +1 @@ +This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.

So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links. \ No newline at end of file diff --git a/transcripts/uncorrected/30.txt b/transcripts/uncorrected/30.txt new file mode 100644 index 0000000000000000000000000000000000000000..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2 --- /dev/null +++ b/transcripts/uncorrected/30.txt @@ -0,0 +1 @@ +Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.

Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.

And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on. \ No newline at end of file diff --git a/transcripts/uncorrected/31.txt b/transcripts/uncorrected/31.txt new file mode 100644 index 0000000000000000000000000000000000000000..73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2 --- /dev/null +++ b/transcripts/uncorrected/31.txt @@ -0,0 +1 @@ +Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.

So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.

So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.

So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.

The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot \ No newline at end of file