diff --git a/annotations/167.json b/annotations/167.json new file mode 100644 index 0000000000000000000000000000000000000000..54e7d56e5726eb86bb024273fd6580aaaa451d9a --- /dev/null +++ b/annotations/167.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bf0802a11fea8d48fcb20cef2e757b3de6ea46d6821cedfea2a7b3b2dece4cc +size 932 diff --git a/annotations/168.json b/annotations/168.json new file mode 100644 index 0000000000000000000000000000000000000000..8bc92bc6c5f07e21a449066aecadf02de4461fe2 --- /dev/null +++ b/annotations/168.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11977717133a41a5807880402bd2267a729dd4d32c39be52b0b9dc67259f632f +size 812 diff --git a/annotations/169.json b/annotations/169.json new file mode 100644 index 0000000000000000000000000000000000000000..68924d24793257a51b81d0e6836e442ba7728689 --- /dev/null +++ b/annotations/169.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4874781cbb02c348ae94ed13898e3666031f1ab74e09805adeefd215fc8c540 +size 799 diff --git a/annotations/170.json b/annotations/170.json new file mode 100644 index 0000000000000000000000000000000000000000..57b8af138ee14639465af85f675c246a98a4e75a --- /dev/null +++ b/annotations/170.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:369d19b68ed80c164a11333552dd9948b1d08ea93b6060d3a3fc6f568c3463a4 +size 792 diff --git a/annotations/171.json b/annotations/171.json new file mode 100644 index 0000000000000000000000000000000000000000..42b043c4f9ec144de9d7bce0edb163e8ef4f5226 --- /dev/null +++ b/annotations/171.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62479fccec01a482e5325d4c0935c5e82aac07c0c0741a1fd0d62099a5c724b5 +size 801 diff --git a/annotations/172.json b/annotations/172.json new file mode 100644 index 0000000000000000000000000000000000000000..c99a43fa2e6b0302f9ba83dab31ebfb7b0992f87 --- /dev/null +++ b/annotations/172.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5bfa4699bc624522e74ff292c56f2106dff9b2dada2f9a72387c4a142ff1318 +size 822 diff --git a/annotations/173.json b/annotations/173.json new file mode 100644 index 0000000000000000000000000000000000000000..cecc83ca40bdf73524a73c9ac12380b3457a02ff --- /dev/null +++ b/annotations/173.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2834671168f89f741dec155b43cd013803b766a318cb6ff356d47f0d9389bf6d +size 801 diff --git a/annotations/174.json b/annotations/174.json new file mode 100644 index 0000000000000000000000000000000000000000..57f35501b5182d9b90e0933b2d3732f80d9ef327 --- /dev/null +++ b/annotations/174.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc8a92f5c93c0dccc77d83e5667d08dc179838171182db0536d4a03763948d6a +size 804 diff --git a/annotations/175.json b/annotations/175.json new file mode 100644 index 0000000000000000000000000000000000000000..dca6743ba5b6bc9b4d0529ed291d4becbbef2b73 --- /dev/null +++ b/annotations/175.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec6c8a0f1e3cef6d1b84415f9192a6ae24e83b1d7059bcab639aeca149b93c5d +size 789 diff --git a/annotations/176.json b/annotations/176.json new file mode 100644 index 0000000000000000000000000000000000000000..d3d1a7bc9425a28a324cd206edcf2b5b3ceb21da --- /dev/null +++ b/annotations/176.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25817316075d9c51a129bafc23e9236e921402bae3fff964364912bf4b4f447a +size 810 diff --git a/annotations/177.json b/annotations/177.json new file mode 100644 index 0000000000000000000000000000000000000000..759cb573fef5011614365852c3141682d714188f --- /dev/null +++ b/annotations/177.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24703e3c7f25b30ca6e7a43f56c6d7bf28b1473892c7ad55cd4a512861a2d47b +size 804 diff --git a/annotations/178.json b/annotations/178.json new file mode 100644 index 0000000000000000000000000000000000000000..9315010ff76af94a2a20ea2e04974232e3cabbfb --- /dev/null +++ b/annotations/178.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bda811f72ad2836f7c752270f2ebc3903e2f879e8a759cccc5a64a31ee53069 +size 810 diff --git a/annotations/179.json b/annotations/179.json new file mode 100644 index 0000000000000000000000000000000000000000..71409639aadfbe3cf1e341090eb825c09313f6f0 --- /dev/null +++ b/annotations/179.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f680c18a79f7809c25416554ca7acbc9d724ef7d9458e264e59fd08ce1cd484 +size 801 diff --git a/annotations/180.json b/annotations/180.json new file mode 100644 index 0000000000000000000000000000000000000000..9ced85aa991933ca3b955521edf5ec305b080f2f --- /dev/null +++ b/annotations/180.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c746536c56fbcfa188b3cf31fdfdeb2dfe3deb5f9d6e6f4bf0faabe7ac53c60 +size 802 diff --git a/annotations/181.json b/annotations/181.json new file mode 100644 index 0000000000000000000000000000000000000000..f4756082282ded6205add1af7f8392595461ab01 --- /dev/null +++ b/annotations/181.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d61751614e0fd949a36ec3d4155f380a1bc7a0ae3fa36d3c982fa25da50c0343 +size 806 diff --git a/annotations/182.json b/annotations/182.json new file mode 100644 index 0000000000000000000000000000000000000000..48385668dd76e0475dbedb303afca36a1cfb238c --- /dev/null +++ b/annotations/182.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:750f76dadfb1879500ec6425eca7a2d8f05e70e399365662402e23cd60a9981f +size 809 diff --git a/annotations/183.json b/annotations/183.json new file mode 100644 index 0000000000000000000000000000000000000000..afe6c0bf14701dc6fbd0334a3199e9c15d4e1cb2 --- /dev/null +++ b/annotations/183.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd0595b3c42a33923bb3ddf105cc16dd2161f6c4e42825de91055ecd7c45497f +size 802 diff --git a/annotations/184.json b/annotations/184.json new file mode 100644 index 0000000000000000000000000000000000000000..093951e1cc051f7aaa2b9346fbacec5118994f36 --- /dev/null +++ b/annotations/184.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75215c15ccd0003c21f0896a2b5cd4314058affce222f589768339a2fc3f06c2 +size 795 diff --git a/audio/167.mp3 b/audio/167.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..9dfb47390f2d75bf7346cf2fc041c77709400f3f --- /dev/null +++ b/audio/167.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c8e8df866e77ea3f601a14a164d7308c7b7b817402993634f56f6aad3b54a11 +size 927404 diff --git a/audio/168.mp3 b/audio/168.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363 --- /dev/null +++ b/audio/168.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1 +size 1524716 diff --git a/audio/169.mp3 b/audio/169.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..4e6118e562291cc4e323becdc0785ec1511c5754 --- /dev/null +++ b/audio/169.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf26ac73ec8ab43d38c87570b6b4b31dd11f583fba15221f524a6e7917ccaace +size 887084 diff --git a/audio/170.mp3 b/audio/170.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..fd4876876248f9873fc1e3c60713825b13fde07b --- /dev/null +++ b/audio/170.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e47a89f43f0de0b54b995ac790cf0041677f8944e208b009dc1caa6029fcf414 +size 888236 diff --git a/audio/171.mp3 b/audio/171.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..fe107dee6d1026fc26dbb36a1592199f96235ae3 --- /dev/null +++ b/audio/171.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9142439d4fafb1423c518b6aaca61ae77e40a257f7f61be2c4167585a5f02f72 +size 3235436 diff --git a/audio/172.mp3 b/audio/172.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..4a16df73f1dfc033e4c949b7db5e9eb94d953887 --- /dev/null +++ b/audio/172.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e831b32171884e6ac24dd23e3e54973da861f48f44a47a5e8fbecf2bc6720438 +size 4359902 diff --git a/audio/173.mp3 b/audio/173.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..8da71db7b099ef696caa2f8771fc236cf73208a2 --- /dev/null +++ b/audio/173.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4a345b9c3eca6edbb9d3c84f69910b206b6a93f453daf1309b41c7ec56fb2f7 +size 1121763 diff --git a/audio/174.mp3 b/audio/174.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..181b74283416b8efbea4fdc0be68c66a3bd13f2b --- /dev/null +++ b/audio/174.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de35c8502abe369ee5876eea1d354a292d135675764ea7606e835a146c7b191c +size 8816183 diff --git a/audio/175.mp3 b/audio/175.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..d3634dbc389f480fea407b0734ed3b54cd00b840 --- /dev/null +++ b/audio/175.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:def7f2c6b46dcd8c9283c0b0bf7a9d992780a2dca4a1e67177d9f153c8fe3599 +size 845283 diff --git a/audio/176.mp3 b/audio/176.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7412b113752b680c479ba0a7906cbca9e73ea9e9 --- /dev/null +++ b/audio/176.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11f1df546ed67b3505a8c3b2df20009b897062b7aea869bb01ccc8b873583bdf +size 1950956 diff --git a/audio/177.mp3 b/audio/177.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7f60b880f0f7e7695a157e9c4c9c588efdffeea4 --- /dev/null +++ b/audio/177.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:706082dc880f42aa397f9aee429f2f8a4d62fa19417e106e646d6031f91e4f11 +size 7845164 diff --git a/audio/178.mp3 b/audio/178.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..a42b02acca82456e4c67bb0a1e5e945357a4e720 --- /dev/null +++ b/audio/178.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4abd166ccade3f644d84f0b394d7c64f90c5b8adee074b6b0bcfb53b95b8e07d +size 2814956 diff --git a/audio/179.mp3 b/audio/179.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..71e19cb0acdc1d576535c3cdd5b6db814c495a7b --- /dev/null +++ b/audio/179.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a4246d9bf9bfdfc1b28b4add8b4a8c746b7473f10e28ff3f909709017b04eb +size 2070764 diff --git a/audio/180.mp3 b/audio/180.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c26a9908e7546cbac92686b02d7f46eeade3f494 --- /dev/null +++ b/audio/180.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a692a5efca0e78afd528ef4edcff7d69e64cbb5989ae20f31ad01cf2faeb271 +size 2577644 diff --git a/audio/181.mp3 b/audio/181.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6e2783e42ecbe6946a752ead990ac8b528f8752d --- /dev/null +++ b/audio/181.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc2e20543f4f7eada7275e0c7b9fc256023320eb7f067edc41e84ff81c5f633c +size 3235436 diff --git a/audio/182.mp3 b/audio/182.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..41a901c1c0525914a2e5b0920e60a968ae0c312f --- /dev/null +++ b/audio/182.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4249ca4f032cf3a438c3f004a48ed2da00c563e603ef284c892302689999bb96 +size 2980844 diff --git a/audio/183.mp3 b/audio/183.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6a753d53c2f383c479cb9973787491507692384c --- /dev/null +++ b/audio/183.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5df94fb343c121e973b335952996dfee728aa804f5adfae688651654e30a1c1b +size 2566124 diff --git a/audio/184.mp3 b/audio/184.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..90557fc63594f7799e1923114ab958ead7e8fb88 --- /dev/null +++ b/audio/184.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:736e83bcc12261e6d83c46b915a26d9c4fc4fbffdf441a4b6a1bc896300acf83 +size 649051 diff --git a/transcripts/uncorrected/167.txt b/transcripts/uncorrected/167.txt new file mode 100644 index 0000000000000000000000000000000000000000..efca153b18880e9095db4f8d42a34fbf0a514984 --- /dev/null +++ b/transcripts/uncorrected/167.txt @@ -0,0 +1 @@ +I'm on to the docs index page, I've just updated it now. And our last night I added it, and I think the microsite, or the website, the docs repository site, actually still works. I'm not sure if it was migrated to Vercel yet.

In any case, I can do so if it wasn't. And then I can add basically update it with all the sub modules updated sub modules based on the ones that I have and then link to it from my main website. \ No newline at end of file diff --git a/transcripts/uncorrected/168.txt b/transcripts/uncorrected/168.txt new file mode 100644 index 0000000000000000000000000000000000000000..5eac1414e49e1b8618ce1ba2193d7d10b91f431a --- /dev/null +++ b/transcripts/uncorrected/168.txt @@ -0,0 +1 @@ +I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.

In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end? \ No newline at end of file diff --git a/transcripts/uncorrected/169.txt b/transcripts/uncorrected/169.txt new file mode 100644 index 0000000000000000000000000000000000000000..6d92e52e952b52fc6c71f821a91da516a96f1321 --- /dev/null +++ b/transcripts/uncorrected/169.txt @@ -0,0 +1 @@ +Okay, so just some changes that I'd like to make to the UI. When I upload a voice note, I'd like to capture as metadata the upload time and date, the original file name, the original file format, which in most cases will be MP3.

In addition to transcript, I'd like to have, so that should actually be called uncorrected transcript. I'd like to also have a text field for corrected transcript. And again this is captured as metadata. \ No newline at end of file diff --git a/transcripts/uncorrected/170.txt b/transcripts/uncorrected/170.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d2caf72445f7704d8455a3c2b790fdf76026b9e --- /dev/null +++ b/transcripts/uncorrected/170.txt @@ -0,0 +1 @@ +The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.

I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system. \ No newline at end of file diff --git a/transcripts/uncorrected/171.txt b/transcripts/uncorrected/171.txt new file mode 100644 index 0000000000000000000000000000000000000000..b80fdbb0fe52236242e2ff1cf53819bba9468fdb --- /dev/null +++ b/transcripts/uncorrected/171.txt @@ -0,0 +1 @@ +The intended functionality of the user interface is that I'll upload the voice note and the automatically generated transcript, which came from the voice note transcription. I'm not going to manually correct the transcript but I would like to record some annotations and the UI would save these to a folder which will actually serve as the dataset itself and what I do want is that the link is preserved so that each either there a TXT file for the raw transcript and the audio or it's recorded at the metadata level.

It should be sequential. So starting with one. and what would be the most useful way for the UI would be that there's a drag and drop interface for uploading the audio because I'll be populating these from voice notes. So I'll be downloading an mp3 from their website and then the two things I do are upload to my UI and copy and paste in the text.

So what I'd like to happen on the backend is that the first file is renamed 1, saved as 1.mp3 let's say, and the text corresponding to it is 1.txt, or they're just linked at the metadata level. \ No newline at end of file diff --git a/transcripts/uncorrected/172.txt b/transcripts/uncorrected/172.txt new file mode 100644 index 0000000000000000000000000000000000000000..b314f3f74074ca02c2a47132cea688da6abb56d9 --- /dev/null +++ b/transcripts/uncorrected/172.txt @@ -0,0 +1 @@ +Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.

Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.

And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.

And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.

But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be. \ No newline at end of file diff --git a/transcripts/uncorrected/173.txt b/transcripts/uncorrected/173.txt new file mode 100644 index 0000000000000000000000000000000000000000..5caeda3e06fef9e0eac96f7de067d3551f906b81 --- /dev/null +++ b/transcripts/uncorrected/173.txt @@ -0,0 +1 @@ +Is there an app for two people on Android phones wanting to do karaoke? So what I'm thinking is, especially for Android TV, the karaoke track is up on the TV. And then the two people participating in the karaoke, their phones, the Android devices are the karaoke microphones.

And it's live so they're going on to the... so it's a karaoke experience but using your phones as karaoke microphones. I guess it would work with the website. Does this exist? \ No newline at end of file diff --git a/transcripts/uncorrected/174.txt b/transcripts/uncorrected/174.txt new file mode 100644 index 0000000000000000000000000000000000000000..145fac41057e67a2489a588fef1f5d5a4b0df965 --- /dev/null +++ b/transcripts/uncorrected/174.txt @@ -0,0 +1 @@ +Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.

Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.

Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.

And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK

So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.

I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.

I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.

So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that ! \ No newline at end of file diff --git a/transcripts/uncorrected/175.txt b/transcripts/uncorrected/175.txt new file mode 100644 index 0000000000000000000000000000000000000000..2a124ce7850e0ade65ab3adf9f0db6019eb34fb4 --- /dev/null +++ b/transcripts/uncorrected/175.txt @@ -0,0 +1 @@ +Okay, when I return to the byte, the DAC, I want to download the user manual or I'll just photograph the one. I want to connect the reference speakers with RCA.

And I also said to Hannah that I would reconnect her turntable because I took away the switch and the connection. So I will reconnect that. And then hopefully it will just work and that will be it.

And I want to also take down the serial number for the inventory. And that would be it added into the into the system. \ No newline at end of file diff --git a/transcripts/uncorrected/176.txt b/transcripts/uncorrected/176.txt new file mode 100644 index 0000000000000000000000000000000000000000..3b89d4d005b9193960db19f14130c6541a589b08 --- /dev/null +++ b/transcripts/uncorrected/176.txt @@ -0,0 +1 @@ +Okay, so this is a good start. In the dashboard, the developmental milestones and all this data. So currently, if I change tab, if I go into the measurement tab or the report tab, it loses the data in the dashboard. I'm guessing because it's running the prompts again. So that's a bad design. It should load it once per user session and then hold that data so that when the user, when they navigate across the app, they're not going to force the prompts to run all over again as they do now.

The second thing is that the design of the elements is a bit bad. It's quite bad, I would say. Let's have them as accordions that are nested by default or will only show the first paragraph of text and the user can click down to expand them. And maybe for each one like psychosocial development if the AI could generate a subtitle so that there's a headline under that and then the user can choose to go into it.

In measurements, when you add a measurement, sometimes we don't have either figures for height or weight, so you shouldn't be prevented from saving the measurement if you're lacking one of those data points. \ No newline at end of file diff --git a/transcripts/uncorrected/177.txt b/transcripts/uncorrected/177.txt new file mode 100644 index 0000000000000000000000000000000000000000..4215c595a95e066a9ecda2a2ae08b9013686c002 --- /dev/null +++ b/transcripts/uncorrected/177.txt @@ -0,0 +1 @@ +Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.

Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.

Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.

The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.

Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.

The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer. \ No newline at end of file diff --git a/transcripts/uncorrected/178.txt b/transcripts/uncorrected/178.txt new file mode 100644 index 0000000000000000000000000000000000000000..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2 --- /dev/null +++ b/transcripts/uncorrected/178.txt @@ -0,0 +1 @@ +Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.

Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.

And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on. \ No newline at end of file diff --git a/transcripts/uncorrected/179.txt b/transcripts/uncorrected/179.txt new file mode 100644 index 0000000000000000000000000000000000000000..35a55fa10abb62fbf49bc2c38d73e8cc53fca620 --- /dev/null +++ b/transcripts/uncorrected/179.txt @@ -0,0 +1 @@ +This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.

So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links. \ No newline at end of file diff --git a/transcripts/uncorrected/180.txt b/transcripts/uncorrected/180.txt new file mode 100644 index 0000000000000000000000000000000000000000..243f36cf36c052964af7ebe83a792dae9e67d205 --- /dev/null +++ b/transcripts/uncorrected/180.txt @@ -0,0 +1 @@ +I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.

But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.

Come up with an imaginative name for this use case. \ No newline at end of file diff --git a/transcripts/uncorrected/181.txt b/transcripts/uncorrected/181.txt new file mode 100644 index 0000000000000000000000000000000000000000..0ec335394a72e80887a3672f290bc5828d8227e0 --- /dev/null +++ b/transcripts/uncorrected/181.txt @@ -0,0 +1 @@ +I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.

And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.

It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda. \ No newline at end of file diff --git a/transcripts/uncorrected/182.txt b/transcripts/uncorrected/182.txt new file mode 100644 index 0000000000000000000000000000000000000000..da218ad130c3c5a5f3ca672509c6c517f4fa87f2 --- /dev/null +++ b/transcripts/uncorrected/182.txt @@ -0,0 +1 @@ +I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.

When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.

The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.

This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case. \ No newline at end of file diff --git a/transcripts/uncorrected/183.txt b/transcripts/uncorrected/183.txt new file mode 100644 index 0000000000000000000000000000000000000000..353b380ddee0d6134e7cfc905de9171524ef566e --- /dev/null +++ b/transcripts/uncorrected/183.txt @@ -0,0 +1 @@ +I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.

And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.

And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that. \ No newline at end of file diff --git a/transcripts/uncorrected/184.txt b/transcripts/uncorrected/184.txt new file mode 100644 index 0000000000000000000000000000000000000000..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 --- /dev/null +++ b/transcripts/uncorrected/184.txt @@ -0,0 +1 @@ +Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.

Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends. \ No newline at end of file