diff --git a/annotations/1.json b/annotations/1.json index c6951bfd44eb0f9cf7fcac5067bec24e073ee55a..14aa86e84bda5e39e5b02a2464c8629929e77f68 100644 --- a/annotations/1.json +++ b/annotations/1.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c7e8f716d041c623ebbf2b497ca6e2ba5d352569180aac3b6a830b728555751f -size 603 +oid sha256:177fd5ecd9764017a21b4ac59d0686cb961c4226408c7b36b0e55bf2f5b7ca0a +size 599 diff --git a/annotations/10.json b/annotations/10.json index 0f4f6aa61dc15f255759de5bf8c90643d91530d6..aaa5b3dce6b60e9030419b2cb2cf08233cdca596 100644 --- a/annotations/10.json +++ b/annotations/10.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fccc6ed3ea3ab0a32ba65f16adf3546038249f4ffb9e6cb6d8ebaf81a432226e -size 602 +oid sha256:eb5e39cfa16b3d09d3e5f65d5b3e82ece10eaeaa9100734e3f5eb10bd87c8a6c +size 600 diff --git a/annotations/100.json b/annotations/100.json new file mode 100644 index 0000000000000000000000000000000000000000..4fe5d45d149b32eb096d6d07157071297c17d200 --- /dev/null +++ b/annotations/100.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47d326246e84398b55063be16c77a32aa72ce0e20d5777e10eb9ff36b853a935 +size 599 diff --git a/annotations/101.json b/annotations/101.json new file mode 100644 index 0000000000000000000000000000000000000000..e1d1a30505ae9a173913af125b5fa9b47d00a8de --- /dev/null +++ b/annotations/101.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a98800423736226ec0b194f77c2608946f28e1632c9254527e5f8109b033e920 +size 603 diff --git a/annotations/11.json b/annotations/11.json index bc4edff107219503a95980ec1cf9e8dc6be76d47..cac6cba4a7b2acfe2a9f6f192a6fd91117770491 100644 --- a/annotations/11.json +++ b/annotations/11.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e9af549adca0bb597ee66aefce06fe19499c355253c4d455a0f619f30c49adbf -size 599 +oid sha256:9fed757280e37b4a90209c59bfeb7012665fa03116892984740536b72c89c3f4 +size 598 diff --git a/annotations/12.json b/annotations/12.json index 7e32521bc9f45db32845464b32d2a9c9ff889c2a..e6e2f9d8e4e93f561c2c22bd6368b5d83cc01d4b 100644 --- a/annotations/12.json +++ b/annotations/12.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:273ccdac287b9a6acdd7e4a58f8364cc766d1ef2e27cca6239f95e36266d2638 -size 597 +oid sha256:36387bf7f331ad8210e9d56e49e0f873d0d6e6812e31bc620c51c0db8feaa0be +size 598 diff --git a/annotations/13.json b/annotations/13.json index 400173a77c85086cf9b9d81e7eae40bb194e3877..1a236634f85ed08b6e55e902f09b6c32851cc7fb 100644 --- a/annotations/13.json +++ b/annotations/13.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d902bddf72c49b073c10e4e15bebf52bb4285e4afd07f8a71e89ac119e356a5b -size 598 +oid sha256:b3fcdf33629dfef5899aa11395edd297192e882cdfde4a45f3d87413ca30b63c +size 597 diff --git a/annotations/14.json b/annotations/14.json index e1c99ebbf3a0b00e6308a83ffac214fec6376207..6e92d7d4c773251857bc4e438635e8b854c9b7c0 100644 --- a/annotations/14.json +++ b/annotations/14.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:82c7f02353745fa558f001b25d185036b8706843b34feb5e8f55d2885d4c31a7 +oid sha256:e069803d1a67f1223ec72a79822de762b9048e1254dfabdf2b5ffc155fa822fb size 599 diff --git a/annotations/15.json b/annotations/15.json index a0f77215e5783cc3947e9df7e74921d7cf13701a..07cbb968829c16dad42af84acff42f2b7ec28e62 100644 --- a/annotations/15.json +++ b/annotations/15.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fe2bba2aad3e3f3d1eef513b471fb0fcf4a121272ad451261402f147a64b2716 +oid sha256:a30960fa11e705545b528a2400f154364dce82e2e1b6c441b71b35aa2035ed2f size 599 diff --git a/annotations/16.json b/annotations/16.json index 557bf8ada2c22d3588652fc9241882a70ca18512..6de2c71c44d79520d932a6a7a893db8583ef5e61 100644 --- a/annotations/16.json +++ b/annotations/16.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9d9cbe972ba1f9b1709e3d397a4e2611e5c5aa1750fffdc0ea650d11f403f920 -size 604 +oid sha256:1b0f7167ae5a101a29ce033f442f021ccec42dab01680023b076dd38246bacd0 +size 599 diff --git a/annotations/17.json b/annotations/17.json index fe6490e35d12dcccff44173124b583a458ab10c3..276b8566a64958f1c9eb09f496da5e18da21f22f 100644 --- a/annotations/17.json +++ b/annotations/17.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:214f147d4bb848b0776d1dd0eb292ab2985620dc7c61ac5240a24d5fe48b518f -size 600 +oid sha256:91a25b444ebf2ee423afa752db4df478ae5392b0afd49fffd8d6cccebe5e0dff +size 603 diff --git a/annotations/18.json b/annotations/18.json index f6cac43eebb53f021e2a358ea181663225990251..e4bd3cca412d060e94d9d4bb73ed4249b51adc30 100644 --- a/annotations/18.json +++ b/annotations/18.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:398a399eace75289ea3eae2cb4d59ab684fe192effe8044acf8ce662db49e8d1 -size 604 +oid sha256:b117bcfc03c38283ea00badfa15614fc5b819fe145165f6aeb8d4ed019247cd7 +size 599 diff --git a/annotations/19.json b/annotations/19.json index c810e4dc35d438133cce97cbaf3df2dc2fcae772..6d8139c2c52a0a8ea1627d266f8ed33e292a534e 100644 --- a/annotations/19.json +++ b/annotations/19.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b2c1747a0b762896d2972470379c537acd6eb21ec4e316cee2861257b6d790da -size 604 +oid sha256:a0bfda21ab1712d9b5e17a1f03a7c5d273164beae1cd0561521662eacdc55e45 +size 603 diff --git a/annotations/2.json b/annotations/2.json index a75ea63eb5e72136c9e3c624dbb7deb19e0c9b2c..87a922a76e82cf5688cb752fa06e36c041da4fbb 100644 --- a/annotations/2.json +++ b/annotations/2.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:76d7352fec4d202142fde410f916264ceadf10a3b5aedd841ef162322b8e8e7d -size 602 +oid sha256:6eb0e0ef846644df49e9390ab7cfd9939559f3708b631296e368a574bcfc4933 +size 599 diff --git a/annotations/20.json b/annotations/20.json index 46e9c98cbb25624f61bd7a64b6639d30b3f6add8..e5a41ab0ed9638ca77cb35ee17a421d080ff92b7 100644 --- a/annotations/20.json +++ b/annotations/20.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0bd151e7c397add34285c5f37946aa8782b8f47f4dc675b78c94e80bc1ff8471 -size 599 +oid sha256:4cd277e2a441d6d006097b99747d1832fe8a17489fa23a179f6cdf2b91e69a0b +size 600 diff --git a/annotations/21.json b/annotations/21.json index cf5d456f9a7de09583a9acff7cc1b1ed0e1c689c..3ef2effa46a6ee9282a9a1890c84a0fd239661dd 100644 --- a/annotations/21.json +++ b/annotations/21.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7ccfd1d97b4d1270c92168434bcde5d7cc6b2cf852aec0c79a9211c79ed09ab5 -size 603 +oid sha256:70d298640200e53583090d4e5483d92b40dbd749c6809ef26c680b189e970c33 +size 604 diff --git a/annotations/22.json b/annotations/22.json index a6531d7ed447bff0267bb1e8ab20febd86556792..acddd7b18084aa283800ed19c5e82225cce00477 100644 --- a/annotations/22.json +++ b/annotations/22.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:45d581fe5341f4134ee5742b18ba733e5b06dfb6d79e0a12254ec68bd7b021bc -size 600 +oid sha256:80d1089b010b33430b8e4c553a48ff27d6018ff8a4c43afa0e5c2dc68b952a09 +size 603 diff --git a/annotations/23.json b/annotations/23.json index 9c3a0130d3759798288b319e4326f49bdca70541..4e2cf79ddf820ed967f993d96b418c5e37af03cd 100644 --- a/annotations/23.json +++ b/annotations/23.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b123a7f2c0f6e01917038ebd78f077b5eaeb40b27cf8dbbb523d3aaf063d092a -size 604 +oid sha256:79aa0596ac83961321202878fcfd28738021a6047adda65a1dccd1aeaf0fb5e3 +size 598 diff --git a/annotations/24.json b/annotations/24.json index fe852aa7cc72a6fdbd26a30341e06f97a7c2001c..f3705d8f935a356443ea7b2e0c069511eb4b566a 100644 --- a/annotations/24.json +++ b/annotations/24.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:94daa978746dd2c1d14b4cdfef10e908712dc812eadb4d68d028038c0cf709b4 +oid sha256:ee10e2826005ea3ef5c3688bbdfef738e110053bc54127af2179e7760ffcdba1 size 603 diff --git a/annotations/25.json b/annotations/25.json index 87aa4727ca896926b9fdf0793b0a0a6b603d089b..3399b27d53006d58fb00e7b90aff8a093b82db46 100644 --- a/annotations/25.json +++ b/annotations/25.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c72e12c6dddabf3105d31fc4b122ad97eecbc80e3c48fe5f595d988d6997cb85 -size 603 +oid sha256:d64a77e959b8dbd3af0f5664c559e959cd7a0c0050c88477151130876600d069 +size 598 diff --git a/annotations/26.json b/annotations/26.json index 69fb3549e2e9e8677df32e93e1b16c25fcfb1d3c..9ec41ab6f13e0263c8a969877e6fa9225bb8cdb5 100644 --- a/annotations/26.json +++ b/annotations/26.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5de999d945e85aab6b070226790249d2d992b2812300bf3ea2f8f2011862ffaf +oid sha256:cfaf980f31151ae89384113b515e327f82c8fda83efbeda6e5a8d027b43255d1 size 603 diff --git a/annotations/27.json b/annotations/27.json index 07af9e5df4f8078d471ff08be0aca5d9af969489..a5d16413ec995e2513e01f34e4854a8698072d8b 100644 --- a/annotations/27.json +++ b/annotations/27.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c778b6cc2cebef617ca5d549beff3e111d4b7f63200be153397c87179423fcda -size 599 +oid sha256:5c9088b181a67bdd05817a8185a2cf71f9c99de46b5f1ce4e1a39073b75f0fe6 +size 602 diff --git a/annotations/28.json b/annotations/28.json index f2d5d773f5c21ab901afe1bfdd9fe26f55da87fb..19f45366a6057250f729cee62aabe1ac68b61624 100644 --- a/annotations/28.json +++ b/annotations/28.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:97dbc3d5cedcd154093c74e1420515bdcf38f060cc09ac5dd39e24af3b15ab43 -size 599 +oid sha256:148b22967b00f16b76b73d98f090c32bd2ee31b69a03ac1a5a8704a0cab8c5f6 +size 601 diff --git a/annotations/29.json b/annotations/29.json index 7b60bfd966a727ee4c36ca95cf71ccc673b42a38..39e443d1517014cbcabcb8df8e43d0cbe4509512 100644 --- a/annotations/29.json +++ b/annotations/29.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:b92a9f1c81131ca385803f3f4f1e3de7ea4d109ce7426409e9f29c94de68f9d3 +oid sha256:77c40fc4c22a6beb3f4571bc3e6f3f57ccd5a2ee39b310a0f5bebdd674008191 size 599 diff --git a/annotations/3.json b/annotations/3.json index c1ceea0d3678f07b113701455e91d6584ef64236..9db168ab6d92f70512abd38b4f10facc0783f3a0 100644 --- a/annotations/3.json +++ b/annotations/3.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5d0620eed97279dfcaa6c6701645fd6838b86ceca1121b82ddd3c9c41a3b7386 -size 597 +oid sha256:2b415b686ffa2fce4de65df4bbec43fcab8200a9100671e3df32fa38472e2d9c +size 603 diff --git a/annotations/30.json b/annotations/30.json index 8768510c8b0c915c4744f2f4b8d571877f27475e..4d9c5f2ad3dbb36709b5f30cda1ca60fbfe6cc39 100644 --- a/annotations/30.json +++ b/annotations/30.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ed435a3f7a9b3fd03f4ac2e5428869a74837137210a087ccb29d6894cb0d8f40 -size 600 +oid sha256:dd17fe7483c13cced87eff2f6dd5201e8cf95044d1950687512168c157b177ac +size 602 diff --git a/annotations/31.json b/annotations/31.json index 80aae1a91c02f5ed3bcfcc236d8823cec23065b0..d4c27afb5df14253cf778c30cdc85124ba614f2d 100644 --- a/annotations/31.json +++ b/annotations/31.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db60a39ec01aaf7f2f8dcffc698241a2ade3f4d09c313cb4ba0d3fb947d28af7 -size 598 +oid sha256:264fa34903fd75de9eb7ee8282ae4d885d4f0f6cb7c967059dcfd6550dc9fe04 +size 599 diff --git a/annotations/32.json b/annotations/32.json index 7ee8c8f1185610c089ba3d2587ef9d5cbd86f575..7683f480710b6106de6cad4f709288da2760e813 100644 --- a/annotations/32.json +++ b/annotations/32.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:74f5da8e36e43f77fe14f4021400593bfc66b06b115d237c9e13638f8c3aeba9 -size 599 +oid sha256:5be3555e987ecc7f16d15ac0191ef1277f0e5ff12f035f258d2e71675282c79c +size 597 diff --git a/annotations/33.json b/annotations/33.json index 25010b38f90fe60368c50ee218bed993d8704682..2020a045d50716fefa9b93f909cb61f51e0dcc41 100644 --- a/annotations/33.json +++ b/annotations/33.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a4e83e16837c8f10d402dedee66310d665d1605c97f450aed9e4bf75a3386e30 -size 597 +oid sha256:18fbb72195874480b7e944f85fb6ee547a3b2339d8c68614ebfc57db7361357e +size 598 diff --git a/annotations/34.json b/annotations/34.json index 586c1c8a11c478ad7834c9431b68e9dc51866204..92ee5a907aa3e51e73e77f458a9f567206d45e3b 100644 --- a/annotations/34.json +++ b/annotations/34.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a0189ccc1716cb552d1e00fe3d33c9d7b5732b5533ed36f2c99fdafb8ef04258 -size 598 +oid sha256:c02e93cef859f00281c465b19c17073cb42bb53cd7725037e0735181f2c61f04 +size 599 diff --git a/annotations/35.json b/annotations/35.json index 67dd07eae8e7c07b2ff652a5a5798ee6713bd77d..c322454e84cdd326fb108e5ea0c23d8368f12992 100644 --- a/annotations/35.json +++ b/annotations/35.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ad1901b8cfdcbe2359875f4ece1bd4e310287064c87027073c4dda4ec0468490 -size 604 +oid sha256:1e3f48c2fdccaf6db543b1ac05d53346fe287207b7bb733b5f54d4405c3a2777 +size 599 diff --git a/annotations/36.json b/annotations/36.json index cf967b69e4bd6c050fb7ec34150fd4689d220d1a..78af85c7e232605013a1ff604159244e0abe4640 100644 --- a/annotations/36.json +++ b/annotations/36.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f46af548c1f55383b4f8b390b9405bac2653987534cb9ec71dbf7151809aec94 -size 599 +oid sha256:dc94c7a469ff937d9b1c7cc66a980d173a4cf87e35115f6f09fbc646017a6bc1 +size 604 diff --git a/annotations/37.json b/annotations/37.json index 50e34a1109ce9c490246de8ae8ad3699583cda27..4b6f9f7c59c6498b5c69cb7656fe6543b6f8ab8c 100644 --- a/annotations/37.json +++ b/annotations/37.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:104e23336250f1183d64e15d1a074a1d48933e364148325f7e8794f26be071ee -size 599 +oid sha256:c3d45788a846da32798817d4ea84b5f592645f777956a0ad345c8d23d2f16ec6 +size 600 diff --git a/annotations/38.json b/annotations/38.json index 491ea3544b80f14e2aa02fc4498d7be37d7217bd..b4aeb7357be96f409e2eaafde67053bea096271a 100644 --- a/annotations/38.json +++ b/annotations/38.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e54068ed2da4b507276215e58f8da433e32cac47af3add383e5074883e8fd864 -size 599 +oid sha256:22d1a9ce72adf548f968598e3509b62dff7e9b96471f67f261c1b754ffc99274 +size 604 diff --git a/annotations/39.json b/annotations/39.json index f8cc8e8a7674a98c99977675214f1e1d0e1733b1..35a89d985bf7287a4fc50222506fda7a653cca66 100644 --- a/annotations/39.json +++ b/annotations/39.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3df04f560bbc6c85f8487dedf2ff78f424ac32089385f4ee50fe9f14e4b213a3 -size 603 +oid sha256:86ca18c84330f5fb7ccd3038e15ddd5a8b8c21feec78a4a7ffdde4ba21cb1157 +size 604 diff --git a/annotations/4.json b/annotations/4.json index f21d4e4fd85e6ef4bd62b5f78ee3574e81a7f488..377212bb2b3a20e695e78f496a7e973e9db3c418 100644 --- a/annotations/4.json +++ b/annotations/4.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:848a22aca616af057d8c0034ac01c76837e13e28b479704eccee911df9f67900 -size 602 +oid sha256:567c123a3945a559463edeb0ef8aeaa8e384cc4b15670ed7f24e55ed7dd417cf +size 601 diff --git a/annotations/40.json b/annotations/40.json index 5fb21893493fef53026d323d34a7df7da6498734..15b85cb9bba86778aea41ad494b802423dfe3f0d 100644 --- a/annotations/40.json +++ b/annotations/40.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:34ff3ea25c1293cf659468ac051727c5c9139ea7096a8a33b55ae341ad1ced5f -size 604 +oid sha256:7d65333b60daaad2c3f27156935e99d9a62dc558b4a685b1165bd81e5ede30c4 +size 599 diff --git a/annotations/41.json b/annotations/41.json index f09f546c3db905086edbb4f0903b7eb5020dfc85..7713d27e93176e73a8ae82ed0fe4a59bc34e9faa 100644 --- a/annotations/41.json +++ b/annotations/41.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:37409e2fc70e52a9a3c845f93647536c138f52773b48ff6609c96bdec182ae70 -size 601 +oid sha256:79575b715af5bfd6e8faabe263821ebd46b16e1a530a3f0cfb9985179f1b3a51 +size 603 diff --git a/annotations/42.json b/annotations/42.json index fe978e3e2bcb02e74b6207152a14c74d9084fc51..accd4049aac425a42ad10737bf91f7a24c1f05f4 100644 --- a/annotations/42.json +++ b/annotations/42.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:81f941b5c78e6ce674feb55d43efcb76bf359b91261b4e7321f4c1479f601b63 -size 603 +oid sha256:faeb649b04652800e785b96f9cfcf581bcd10d4215de54c369316634386001aa +size 600 diff --git a/annotations/43.json b/annotations/43.json index d02920055a2305f39b385c29c090f317a40bf449..032b4924660546dd17b80f1bb1c19c49a218511f 100644 --- a/annotations/43.json +++ b/annotations/43.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:63cc3c54b974427a9e2e272fab077dd2e68fb4c461187cbcbe11ea65e78d69fe -size 603 +oid sha256:3aeb9129517fb819bec8de041c15ae0cadb0417af49d5afa2dab369f8abd9aa4 +size 604 diff --git a/annotations/44.json b/annotations/44.json index e19e09c475cac227a48b9ffec37cf0970fdb343e..308e1e9039b5140acd7d7c296b30fbec0fdc9407 100644 --- a/annotations/44.json +++ b/annotations/44.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d3005add021e6a6073a7a5d3ac06e98263ee66eb1b98f93d4b08e0138baf635d -size 600 +oid sha256:4779ddfe44c3f9949eaff4d848313c90581f0968f839be8c6c73723503bdbff8 +size 603 diff --git a/annotations/45.json b/annotations/45.json index fb51095aa15f3e5235d557a6afe8b7c1c0b89d74..b41fff5cb41f1904301822fc173490454dbdfa10 100644 --- a/annotations/45.json +++ b/annotations/45.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a452dc1d18eab57d0db9e2dac8bb596986abc7f9ca2393c86097cedf6394e732 +oid sha256:8bf77c0aa5247665ccfbe6f403bc83df043ecdae7419bd58af1d61e910a0db78 size 603 diff --git a/annotations/46.json b/annotations/46.json index dbe30e62dbf82d0f5cb5d0c9b4a6daf8c25d5235..3c2213175846feda974e7f05dc51d6393f14cd17 100644 --- a/annotations/46.json +++ b/annotations/46.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:56890a422e1246089c77be5d8425520010197efe4930e8fc8cd149821629ebbb -size 601 +oid sha256:ac7c3110abf97319a13a1a8c62492ea6e7f5dae6783b9751c8a662a6a21b84a0 +size 603 diff --git a/annotations/47.json b/annotations/47.json index ddf702fe7da7931ab13edd474e03977d5c0c8684..bc53c5cb8b6140bbe16f54b4aeb0862e883e0c7f 100644 --- a/annotations/47.json +++ b/annotations/47.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a5e861a261aa25e0ae31a9f5c44b5108be32afe5829fb4ccac19827124caf36c -size 604 +oid sha256:2c4dddf8ca8c800edafbd639b43a41c1248cf32761470ff07358e57cda4a5a36 +size 599 diff --git a/annotations/48.json b/annotations/48.json index 7c04df35f3c38f85495bd0fcb43e79e0ec36a888..ed5b118e70f5e0b9f224ec7ec3c45a4dae1ad27e 100644 --- a/annotations/48.json +++ b/annotations/48.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:809b9fefabcda3dd6fc2b186b7c00b26492b50fe6a6e8ff41b0f425e31ca0c11 -size 598 +oid sha256:86f5e524efe7bfd965e2a8fb589cce147e63dfc44008648a78066154cad17f94 +size 599 diff --git a/annotations/49.json b/annotations/49.json index 43ff259c887cc5915642ab5466e4fb4f03283af2..f8bb63243807414270329ed637d27733937395a2 100644 --- a/annotations/49.json +++ b/annotations/49.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f410123cb4fceb522477f7b30aeb365755e39a3ffe0fcfbf38045dd2ee8333d2 -size 600 +oid sha256:8614e2e58a289f406993a86a503f48f64becb0ec88b47dda435b7846b1ba70cd +size 599 diff --git a/annotations/5.json b/annotations/5.json index 545e9c0e7c04683cf0769c0e4e9c321f1fba16b1..bda7ea30c30cb8637884ec5c4c03758b785b2133 100644 --- a/annotations/5.json +++ b/annotations/5.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9e56616e820ffcb350a8156c50c451c6141830ae13569aae6b44694c10060047 -size 597 +oid sha256:c2d200fb9e51ac78e4d9cb486c0b7d2f38e0c37e7b48ecf5eb8a168603d8efb9 +size 602 diff --git a/annotations/50.json b/annotations/50.json index 28f04cdc9075c9c2b415581089d9cdc1e03d6ef4..716dc081c1d92a43fad2a39a66f4ea926840d839 100644 --- a/annotations/50.json +++ b/annotations/50.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:59e87b153c98612abd961eb4b0de42454733a8a00cfba2369f8f9f84b01b8e73 -size 604 +oid sha256:bd8417d36f6875ffe56a1de2195bfad6d39114b29bdb6983d03bef7cd757ded8 +size 600 diff --git a/annotations/51.json b/annotations/51.json index 7f775594c8dac8f5897a2094e97ae410f4abe686..593915c102ec167312f08cb75ae9dc3d2f03177c 100644 --- a/annotations/51.json +++ b/annotations/51.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8b700535ed7d18c8dce0225d9a6531fd74e69c37bdbb27febedff849d1bdeeba +oid sha256:d1068f4ce9e30b4d235b0e7afa04fcaba99558b4f690238aa739b0ee14c7dae3 size 598 diff --git a/annotations/52.json b/annotations/52.json index 0329b5138e5d8b66a3105de3e9d0e2c066cda776..e6c746e8b67d3de6929b55dd98061b2b39fdd586 100644 --- a/annotations/52.json +++ b/annotations/52.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e6a243975d0ee520df2b118a1e4ab3e30644288e06102b7622cbeaa9df37aa19 -size 603 +oid sha256:621d281397c72f51df3fc77caf8708c470adbee9763ff9adc86ef741ceb6f205 +size 599 diff --git a/annotations/53.json b/annotations/53.json index 5cc04a1281327841ab7fa2085f4eb39bfd0c9a77..b69c89789febe5c9ecc2cee97bd4239c98697308 100644 --- a/annotations/53.json +++ b/annotations/53.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e55fdb55ef5f12b8429c188508eb34ec15a1b3af933288735be0d2ed063c46fb -size 604 +oid sha256:78420eb698ea3d76e1f0cc7aebfd801ea67073a064fcbf17d17ea89ae391c62b +size 597 diff --git a/annotations/54.json b/annotations/54.json index 7a752c935a54ecddb15a7881b5aab0c0de99cbea..b01ec79a9f6413a489e9514dd685f885db4c45aa 100644 --- a/annotations/54.json +++ b/annotations/54.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f49f04e960d0cc25b46529c0789ff867bbf2896b8dfa1e9a2b4277dea4c5cfb -size 597 +oid sha256:c068c6627beca2a85e926ee3c38bd5bac56e580c5132cac93a15aa188acf95de +size 598 diff --git a/annotations/55.json b/annotations/55.json index d1d5e9fca1f6119416d414ac52d30c25ac215911..60412ac0679711ba6aecd11320bace8e45441a3c 100644 --- a/annotations/55.json +++ b/annotations/55.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c17350b468bf14d2fa51ca367e731a2dbab81c574199a82339a0ac7bfb97b410 -size 599 +oid sha256:c7bb8d7f3795ab0e44c9ce7a9935e955019f9f97ef9abe5619fbc528668826a5 +size 604 diff --git a/annotations/56.json b/annotations/56.json index 7ad00af4884ce2b412f54b192397646e8053cc6a..d62687d08402dbc206c2ab36cb3bc8638ca640fb 100644 --- a/annotations/56.json +++ b/annotations/56.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:46953be564670f5c8eafa35223480ebb839c156e05ffe955e93c17302d1cd17c +oid sha256:dddcad1c14cb410454a2cf881fde7ab2855632f42a168e08c6ef845b41f98b7f size 599 diff --git a/annotations/57.json b/annotations/57.json index 74d5e27a87f4632fbdfa695c77c21c6db4ce334d..67557d7bc68283d93938343e5a2a1c9d9985ab2f 100644 --- a/annotations/57.json +++ b/annotations/57.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:dbd3ea20c16caf338f5c9d3c77703addf0283f41851957be9af37fa1dce62b2d -size 603 +oid sha256:de1e98ca68ebacb4a171fe72ba818b88a870c8cf3b2b6670e40c88ceb580a875 +size 599 diff --git a/annotations/58.json b/annotations/58.json index 952fad3f176dbb2cc7253cbc179cfdbb6bc15dd1..c6632ef5f97e51c9e8985ebabbcc02ff29e56699 100644 --- a/annotations/58.json +++ b/annotations/58.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7f3fac35c9c43ba04a032294a9f5332088a539adb24b04fbfa18378c25d4f748 -size 601 +oid sha256:069bc9a7cc8b9e4bd91154ef8641c858bb14086337e2305bf7573a37a6746f54 +size 599 diff --git a/annotations/59.json b/annotations/59.json index bec1e9a5ee7b1c3c45c793fcb7fa6dae8d252c22..646f34778bbabe522b59a36bb10cb74786734433 100644 --- a/annotations/59.json +++ b/annotations/59.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:be3f7f81ea6ba2affa13e681f87020dbf71827fea811f065108a88ab62024cfa -size 602 +oid sha256:76a3b416fc71c45f48ae7f1f9fba6e52eadb1abfcf90b4e3c53d623471ca14ad +size 603 diff --git a/annotations/6.json b/annotations/6.json index 98055ed6cc969f36252285b733d14f2ed374095e..f9a48d8dc34250a623ae702f2f51d8a70db2d886 100644 --- a/annotations/6.json +++ b/annotations/6.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:85f0a8806b3921b5f3c9c4232322bfa78255658ebedb1d2610b0ecf5215b87de -size 602 +oid sha256:e655cd988b0ef2cd5a2d5a79f3edc466e22c58b96305ae0374f2d94aa6df0e59 +size 597 diff --git a/annotations/60.json b/annotations/60.json index cf3f6867404e6750f190071a01912356440d9eff..70158626cd350e857ea42ca9de15ca93b3234831 100644 --- a/annotations/60.json +++ b/annotations/60.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e51097ae517562a619db0608fc200bfa56230ef883f9be3866f4e7592b72dd01 -size 599 +oid sha256:c9b3df327d82305249ef778f8e1b8d6256029f53e5bf927206d846ade3fec258 +size 604 diff --git a/annotations/61.json b/annotations/61.json index b2b3778b1545f6a1754d469e72abf76efea6d795..a98351c0e130423c9920e3fecea7046906d3efb6 100644 --- a/annotations/61.json +++ b/annotations/61.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:40390498b6855a17cd635e7d248d583cb5c2a4daf64df9ad1d349b95ffc904b5 -size 603 +oid sha256:df3c7eb03ac0b2805bddf490ce699527256600e850a6a5e17057f70fbf019da9 +size 601 diff --git a/annotations/62.json b/annotations/62.json index bfa6ecef0ebeffeef4eca4c7ac0fd97bfc505802..1b71863183696d3e1853c181ea3a3114ae9e9eda 100644 --- a/annotations/62.json +++ b/annotations/62.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bf91a07ad31de91299dff6b4c083493ce05e78d11f3abc9be3a316fbdd456ac6 +oid sha256:3ba9dc223601e8807652a7fdb6cc6d9c92c6cf742d053abcb1a8ad3cff493ff2 size 603 diff --git a/annotations/63.json b/annotations/63.json index 119d28f9d1233e1fe0c95926cc1ac78d390d7db8..edf8564fefe60ce1f08509f5ba6d7c2262341f6e 100644 --- a/annotations/63.json +++ b/annotations/63.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d09c80b3170af3b7a333a715e6c1d9a369b09a7e6feebb5dde1a9ad1fbc62fc -size 604 +oid sha256:21c32a51d5a1196377443e3a6d50a3e471fb3d3400acd5dd6c3905ff9f30fb0c +size 603 diff --git a/annotations/64.json b/annotations/64.json index b7fef4528da2d9c9a56d2b799377d620f67899f2..2d57bae187ceb0b48958c23c72fc8de36aed1aba 100644 --- a/annotations/64.json +++ b/annotations/64.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4a9f6810007242e5ef6d460b26e0511ded6d360aacefb4d541d10397ad50bcd5 -size 599 +oid sha256:95168053a456730366c5adf5be97bd0da4bb96aba224514eea20c79b2019d2bc +size 600 diff --git a/annotations/65.json b/annotations/65.json index dbe614516dde37a394f3242334069edf9ae6da58..ef8372a8aa9979f7a6f1cb0b20bc7937045c79e3 100644 --- a/annotations/65.json +++ b/annotations/65.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9ad36d0be79a2007e58d359c905b3074d057a04d21fd6645910b044ee200dc5 -size 602 +oid sha256:faa50c1684656df4170763bc0fe5515f54888fa701fb84dbe84200a75c569c13 +size 603 diff --git a/annotations/66.json b/annotations/66.json index ef1525ab9180747f6546e7d48c226d97ae799a62..8d5ca2938ea92c3d51bd54a5b8e92907a83b019d 100644 --- a/annotations/66.json +++ b/annotations/66.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d27eb3c9cc133397dbeb9df65700045660ddd975c5409da5fac90e9a52ccf414 -size 599 +oid sha256:17f124fded9c4e25935b1c399bd0fd794f848cd7dc2fdd6d5937f3c81c27d79c +size 601 diff --git a/annotations/67.json b/annotations/67.json index f5d0a007a84d26ac9b70e66384e0a589a54dfb1e..3e6220358d7b85ce9098e30cff0badcd25fc345b 100644 --- a/annotations/67.json +++ b/annotations/67.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ffe91652b198e4edc4abd8e51bbda1c57a68768e15d273549463f8f9c395f5be -size 599 +oid sha256:7032ced9d9dcc2ab6bf47b603f4756e9d61472e3bf1f6b4a81ed9cfb5d4cd4cb +size 604 diff --git a/annotations/68.json b/annotations/68.json index 89097626f7f9f10ef97a801f00794f9fef0b3939..bcc971c34782c73e936f814d8405b3e4a6c56e4b 100644 --- a/annotations/68.json +++ b/annotations/68.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1aa49d34e46b1d3a85a6316e7bfffa9ea246373b480947c8c952f7393f83eff8 -size 604 +oid sha256:2ae8a4c5a4641fb07308ee2abd290c15f6e710c693d10a078b0ff265af27331d +size 598 diff --git a/annotations/69.json b/annotations/69.json index 2e8fb860515c83e60ce7cb01cb3d79535a398298..13f25f8aaa29b3be9181c826ab503dc3c7b39d12 100644 --- a/annotations/69.json +++ b/annotations/69.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9cb9f262c4a1f65b9ff5e6b5fead69ac54d215abdd4718ae834f19514faa3f21 -size 598 +oid sha256:c7294c8b3f8fdc3810c98657228f139f66f1e31546ead49c5b3e533be4f906c1 +size 600 diff --git a/annotations/7.json b/annotations/7.json index e8389a0f9635c667039af92b58fca55a806250b0..d65fe317113fbef4f153846193c017ce049cbc6b 100644 --- a/annotations/7.json +++ b/annotations/7.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2780fe92e4e525ab02bbef34c60aeeb189ef77fc45a5a56cc09f00b0d4c6a6de -size 601 +oid sha256:cacaf49007947505d10d06c2915879de5e216ecb451d595b80c86206ad47d2c0 +size 600 diff --git a/annotations/70.json b/annotations/70.json index 98a13941cc0da1d4800278b3d32c123d057f8c62..08df3b452117fb46aea8227281c2a4f20ab1b006 100644 --- a/annotations/70.json +++ b/annotations/70.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:2e9a791a30a0f694b251b6ab17ac12bd543680ec998b869d6f2831c8236395f9 -size 599 +oid sha256:700dbe6ce4cfb4b7520946c280110dc8befa6890f6c824d72776ab2b53bbbbee +size 604 diff --git a/annotations/71.json b/annotations/71.json index 9416c168e0d5a887a44f7ead84afa65c65da70a7..7a9c47034030b66a3ada2fe7bc00cec10b58dd07 100644 --- a/annotations/71.json +++ b/annotations/71.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd3fbb38ece9b710fb73e1189fdec92a50407c0be694fbb5b10e67fec16510f5 -size 603 +oid sha256:d881eaf03562976b6660aa1ef06dff89192ed976a31a9387a05a9ccac5258cac +size 598 diff --git a/annotations/72.json b/annotations/72.json index 05cb56834025f0ff005f4e3b8a18e949060f2a11..a43d3381261015dc753e087e15cbcde4debf0db3 100644 --- a/annotations/72.json +++ b/annotations/72.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4543d490cbc2672d131fb88bf836647406b25fd41d42b3dc13b52c91c978cbba -size 604 +oid sha256:bb4da22447002aa11fb254be19138bc74e7afb614fde80930c58bdb43f3dcf97 +size 603 diff --git a/annotations/73.json b/annotations/73.json index 99c3f939b665aac7e4a885885d597772217aaf2a..6882ac9426133c0f35703f0be4e8f401cd3ea152 100644 --- a/annotations/73.json +++ b/annotations/73.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fb8d920a53103305eb329c1788db2dc788ccccf201ec52c80ae4531a079a9e3d -size 596 +oid sha256:b44d0bfca997a99e84e886bc99cecd65a53a68a06619026ce8d658c4e2aa0107 +size 604 diff --git a/annotations/74.json b/annotations/74.json index f2711adc26f1058c859996b6c547a349b93c5292..5b59e44e1c2075183a2b1e29f19747f8869ffb40 100644 --- a/annotations/74.json +++ b/annotations/74.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:becfa8ed7af98f0bc4d1d7328407f62518280820085f2168eddbbf6ba51d8f37 -size 598 +oid sha256:50c84c94204d3730f41653a275d318e63629eb95090d467ab094845920dcab18 +size 597 diff --git a/annotations/75.json b/annotations/75.json index 941076a09bad8d27954a4a1facf28ae33281d176..56620ef92c8b01a8b7ca2337e063767f86853590 100644 --- a/annotations/75.json +++ b/annotations/75.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:928b682dd65cb6de08d3fa99f8d381f2582e643e9c81c03fc5f44d0fa0efd711 -size 598 +oid sha256:1d29e278e8baa8f7dc84a63992428df116d7c8716ba772ae47c60b7e32791394 +size 599 diff --git a/annotations/76.json b/annotations/76.json index 70ba1797e93895b435094d1da32564c561d5876c..46c65d95a92ae64141bbf1ef696a67757204bf9f 100644 --- a/annotations/76.json +++ b/annotations/76.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6601d5a55c67e146ee704ceb138b539d4bb96be7e6242de6972442dee7a6126d -size 603 +oid sha256:871802128766ed0296521483e4abf15dff9fb4fcb5fb49542ba656682c93ab4b +size 599 diff --git a/annotations/77.json b/annotations/77.json index 362bab6f0eb64364d9bbca4c9065dd6095019132..82688fa37ec3ceb9f336a8b6f7cb0aaf691d2ead 100644 --- a/annotations/77.json +++ b/annotations/77.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:fd6b28da7dd457706097ce03cfb363bd6b765382e09e56cf76757dba7eb9f22a -size 602 +oid sha256:6d95818ceb8a9a3f97bd5554472186e7abfc9e993e401d7c50115ddd8f08a1e5 +size 603 diff --git a/annotations/78.json b/annotations/78.json index 34d71b51f6ff84b102342dc76e6ce3f44aeb7024..61f2f0ab0f0c0e2847297c061bae51b21c928996 100644 --- a/annotations/78.json +++ b/annotations/78.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9b0c34c2922866ce5e686c3965b43c4d058c4adb99fd24c1afe0c5450157ec64 -size 600 +oid sha256:6fa377acce0776e40c6eb007eaf29a7c0bcc59c4249e9f93a839ad86347bf0d1 +size 601 diff --git a/annotations/79.json b/annotations/79.json index 85979239e57ae8d0e55c76f0d5a67124017da243..4062d52ef9b97eb43f0704ea9cf826c3b5642f78 100644 --- a/annotations/79.json +++ b/annotations/79.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8e250f8da658e34883a504aef0c23ae98018b88839edd1cc19217d0f2b0c4cdf -size 601 +oid sha256:6b4c439c2fc99bc86f90326ba1976cde10587120579a7e2a0372294e335fcf9b +size 602 diff --git a/annotations/8.json b/annotations/8.json index 6d9e68feb89c0fc0f5813ce0c5fc28c620ca29e9..ae5e5ac64632b83b1feae0128da3c7f31c5b8536 100644 --- a/annotations/8.json +++ b/annotations/8.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3b2f38f2025df2984bc6cd5efcf9f56a41778e1703d828e9ba866b5348f3a8d6 -size 600 +oid sha256:92593b34a83c6cfd7073654e955db26ce46db7914fe1dd602183fcfa8219cb45 +size 597 diff --git a/annotations/80.json b/annotations/80.json index 5be03cf12f2f2aacaaef010cab14790e813c87c9..862931a32e3ac7d3dd0c51b759afbf5fab54d72c 100644 --- a/annotations/80.json +++ b/annotations/80.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a1ba12e7f226a14f33d713dad3c97ed0c8cc574daed316ebb6f97071d5fcad19 -size 598 +oid sha256:bae3c5a1f9382acc02924b35194abc5ee24b32e00f3b5fef56c79ec937dcaa07 +size 599 diff --git a/annotations/81.json b/annotations/81.json index dfab610dd6bdaeabdb1c4daca9163ec827fe527b..f63b4f4113defffb68cae02e64371ab246caf6cb 100644 --- a/annotations/81.json +++ b/annotations/81.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d9cf0bcdba0de76e3cc857a891d7533286858d006947292ae50f7f261ae5702d -size 602 +oid sha256:242ec090279288472acae87711a4c7639982b8beba69b0102ed3ec6c1b29ba35 +size 603 diff --git a/annotations/82.json b/annotations/82.json new file mode 100644 index 0000000000000000000000000000000000000000..484a860d7738fd80f82400e9de1330d23de852c4 --- /dev/null +++ b/annotations/82.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8595dc0089357017e2320a221cd7cd78e91375bd0bd9e65ecc2b0954bb0ee67 +size 603 diff --git a/annotations/83.json b/annotations/83.json new file mode 100644 index 0000000000000000000000000000000000000000..b93f02efbac9b9e3c0f108d4fe54d06116e06cda --- /dev/null +++ b/annotations/83.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2da6635fdbcf6550b64e43e2f2dd35c08cd294171db2e853b87497d62b82e4e +size 604 diff --git a/annotations/84.json b/annotations/84.json new file mode 100644 index 0000000000000000000000000000000000000000..4908f660318c36b6b59e395c4577c163ff82a91e --- /dev/null +++ b/annotations/84.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3915bd6b38bd2da4f37c38227185722ca7f4aefd0c2c57d7b0125208f33981a +size 599 diff --git a/annotations/85.json b/annotations/85.json new file mode 100644 index 0000000000000000000000000000000000000000..167cd0e7e7cbc103014cc78ee2c1494e9bfb4f17 --- /dev/null +++ b/annotations/85.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b4d8ef22da1724a895d7817fba1823a5f663e9ced592ac8ce8fca51bf51288c +size 602 diff --git a/annotations/86.json b/annotations/86.json new file mode 100644 index 0000000000000000000000000000000000000000..4bcce266672e1e78ba654a9cc5068e8d8a20efbc --- /dev/null +++ b/annotations/86.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6ce85ea8ca4ec86b725822080b68c08d19616c677c7b1c6f40e1ffc9198552c +size 599 diff --git a/annotations/87.json b/annotations/87.json new file mode 100644 index 0000000000000000000000000000000000000000..bf078a16a0f00b6a270f698fd692c85c728616ef --- /dev/null +++ b/annotations/87.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9ea4843b8d3fa45de372d9a4e468c4f0be7ce5cebb3abf69d8a56fd87c1be01 +size 599 diff --git a/annotations/88.json b/annotations/88.json new file mode 100644 index 0000000000000000000000000000000000000000..0056fd724c1a4336e286f692e7e84633e20701c3 --- /dev/null +++ b/annotations/88.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb7c474a36bb0e20ffef1f247aefdf6b0f1aa91e8c536e72a3a52016c8f26c74 +size 604 diff --git a/annotations/89.json b/annotations/89.json new file mode 100644 index 0000000000000000000000000000000000000000..e63844eaa1fe70e004ffa3a4638ac8a456574117 --- /dev/null +++ b/annotations/89.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9bec71d88c9b50477f543ae09c1a67b4890a94661fe6a099911ca0176a40f39 +size 598 diff --git a/annotations/9.json b/annotations/9.json index ad0a5801d7842449ff5c787897d78f81b7dabdc6..a69d232d132121706e1d1fde97db39600cbc3001 100644 --- a/annotations/9.json +++ b/annotations/9.json @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:432932633a47bc1dc0820d033dc8ec154b76bb069e4b64a99187e36dee0280d8 +oid sha256:f863ec38b82e8bf213e084aeb5c97989610edde3af9a7a23db9ba4d8c74cc408 size 598 diff --git a/annotations/90.json b/annotations/90.json new file mode 100644 index 0000000000000000000000000000000000000000..ba09614d67d50768827258572d9c8f225062781f --- /dev/null +++ b/annotations/90.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f743442640a0d7859faad101955184207607d1123542a304f3c0e997021941f2 +size 599 diff --git a/annotations/91.json b/annotations/91.json new file mode 100644 index 0000000000000000000000000000000000000000..29eb333d525c603523f63d363b333a63a39f45af --- /dev/null +++ b/annotations/91.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a915066e395ad4ffd3f6d2f0a847816a9606501edbca4472b3a677fc356e42f +size 603 diff --git a/annotations/92.json b/annotations/92.json new file mode 100644 index 0000000000000000000000000000000000000000..ab0aac5f80586fcea5ce6515b8b3784cf44ccf96 --- /dev/null +++ b/annotations/92.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b3bc24a1f66b591e86e5db7880190c120780a27d7dc393726118eadee392908 +size 604 diff --git a/annotations/93.json b/annotations/93.json new file mode 100644 index 0000000000000000000000000000000000000000..b27d8415b050d3dd571c9221ec4400ef481fa6bb --- /dev/null +++ b/annotations/93.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdb5ecbc263d95f2833f139929a5fab04900c660d1e36a41e9fe9d069021b6e3 +size 596 diff --git a/annotations/94.json b/annotations/94.json new file mode 100644 index 0000000000000000000000000000000000000000..5b558c486e177d7d5a0f5d55db04079fda7289de --- /dev/null +++ b/annotations/94.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6262fa97c60612633c530bfe9e5134966b4ea48bd9295fbeaa11dda41aa8eb4b +size 598 diff --git a/annotations/95.json b/annotations/95.json new file mode 100644 index 0000000000000000000000000000000000000000..4a3f9c877f097c7f522420a9837f282b9810ae4d --- /dev/null +++ b/annotations/95.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67effc29b48748637afae6612c5af17740f7c8375edfe4124e546f127de123f1 +size 598 diff --git a/annotations/96.json b/annotations/96.json new file mode 100644 index 0000000000000000000000000000000000000000..7b78a2bae770da857347a00ddc367983c2e6a7bf --- /dev/null +++ b/annotations/96.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e90376b04676d41552cc1bff585460b250ddc0fa9749a0e1e07f22cd8a000c94 +size 603 diff --git a/annotations/97.json b/annotations/97.json new file mode 100644 index 0000000000000000000000000000000000000000..a2f342e9d0b5c416de98ad0a25226efdb190ceac --- /dev/null +++ b/annotations/97.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88951b2b1065f2894e75ae4c719ba914ec8b5f787365afdfdbf225735036539a +size 602 diff --git a/annotations/98.json b/annotations/98.json new file mode 100644 index 0000000000000000000000000000000000000000..3698c1e3cecf9965b1a03c0654b4ffb1d033f282 --- /dev/null +++ b/annotations/98.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:786d69d4b0ecbd728ac6e011357a8c9198029d5970c1508d4665cc96bc52e091 +size 600 diff --git a/annotations/99.json b/annotations/99.json new file mode 100644 index 0000000000000000000000000000000000000000..e39028335c1fe1b4f393328e94f39aeb19854826 --- /dev/null +++ b/annotations/99.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9205a0c8f7d7d96c67abb84c14a3378d7f1fb64f260529d10d83de608b56e12f +size 601 diff --git a/audio/100.mp3 b/audio/100.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..73bd073932fdb9455d990f341ce98282d850b363 --- /dev/null +++ b/audio/100.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7ed9e2ba97d3231457b3e699f67130488af59df2827599cecbaa4f054e1ccf1 +size 1524716 diff --git a/audio/101.mp3 b/audio/101.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..67d92439269479d26ceb87aa36b281d1a75a16c7 --- /dev/null +++ b/audio/101.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff2b40d06add3f07ca26e609ca0fef0270b9f4e72bbfe33a31bf193bcee7e96b +size 4384556 diff --git a/audio/82.mp3 b/audio/82.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..5e27bedfea3c81a479d3f828a25a3d068a880445 --- /dev/null +++ b/audio/82.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcc72030e7b00e1fe1cc439adc827178a773a020a0a87e41077bc42ccf42126f +size 2587436 diff --git a/audio/83.mp3 b/audio/83.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..1bf9f337fbdc8be5726c5ff504a51444456d59ae --- /dev/null +++ b/audio/83.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ea80e81bd6d3a5fd0addc1a52eca7496af6a382ff83a711675817d274b92300 +size 3605804 diff --git a/audio/84.mp3 b/audio/84.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..1e3e602a76377523a560ac7b7c4165900e7152a5 --- /dev/null +++ b/audio/84.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a851078868d37b85ca594db6811fa7a9410764bf5e55f8de9ff57d0724843d98 +size 5389962 diff --git a/audio/85.mp3 b/audio/85.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..f9e8f7a5846a7ac0f5cebad7ee4214b58282fd8a --- /dev/null +++ b/audio/85.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c914d6e441095bd6a40aa5a558418ae5a321ccf83238b073b8f4f16c6154f39 +size 1672842 diff --git a/audio/86.mp3 b/audio/86.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..90557fc63594f7799e1923114ab958ead7e8fb88 --- /dev/null +++ b/audio/86.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:736e83bcc12261e6d83c46b915a26d9c4fc4fbffdf441a4b6a1bc896300acf83 +size 649051 diff --git a/audio/87.mp3 b/audio/87.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6a753d53c2f383c479cb9973787491507692384c --- /dev/null +++ b/audio/87.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5df94fb343c121e973b335952996dfee728aa804f5adfae688651654e30a1c1b +size 2566124 diff --git a/audio/88.mp3 b/audio/88.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..41a901c1c0525914a2e5b0920e60a968ae0c312f --- /dev/null +++ b/audio/88.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4249ca4f032cf3a438c3f004a48ed2da00c563e603ef284c892302689999bb96 +size 2980844 diff --git a/audio/89.mp3 b/audio/89.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6e2783e42ecbe6946a752ead990ac8b528f8752d --- /dev/null +++ b/audio/89.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc2e20543f4f7eada7275e0c7b9fc256023320eb7f067edc41e84ff81c5f633c +size 3235436 diff --git a/audio/90.mp3 b/audio/90.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c26a9908e7546cbac92686b02d7f46eeade3f494 --- /dev/null +++ b/audio/90.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a692a5efca0e78afd528ef4edcff7d69e64cbb5989ae20f31ad01cf2faeb271 +size 2577644 diff --git a/audio/91.mp3 b/audio/91.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..71e19cb0acdc1d576535c3cdd5b6db814c495a7b --- /dev/null +++ b/audio/91.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a4246d9bf9bfdfc1b28b4add8b4a8c746b7473f10e28ff3f909709017b04eb +size 2070764 diff --git a/audio/92.mp3 b/audio/92.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..a42b02acca82456e4c67bb0a1e5e945357a4e720 --- /dev/null +++ b/audio/92.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4abd166ccade3f644d84f0b394d7c64f90c5b8adee074b6b0bcfb53b95b8e07d +size 2814956 diff --git a/audio/93.mp3 b/audio/93.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7f60b880f0f7e7695a157e9c4c9c588efdffeea4 --- /dev/null +++ b/audio/93.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:706082dc880f42aa397f9aee429f2f8a4d62fa19417e106e646d6031f91e4f11 +size 7845164 diff --git a/audio/94.mp3 b/audio/94.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..181b74283416b8efbea4fdc0be68c66a3bd13f2b --- /dev/null +++ b/audio/94.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de35c8502abe369ee5876eea1d354a292d135675764ea7606e835a146c7b191c +size 8816183 diff --git a/audio/95.mp3 b/audio/95.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..4a16df73f1dfc033e4c949b7db5e9eb94d953887 --- /dev/null +++ b/audio/95.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e831b32171884e6ac24dd23e3e54973da861f48f44a47a5e8fbecf2bc6720438 +size 4359902 diff --git a/audio/96.mp3 b/audio/96.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..fd4876876248f9873fc1e3c60713825b13fde07b --- /dev/null +++ b/audio/96.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e47a89f43f0de0b54b995ac790cf0041677f8944e208b009dc1caa6029fcf414 +size 888236 diff --git a/audio/97.mp3 b/audio/97.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c7bf9a38a696b47ded9a2fb62a552b74dddfae4c --- /dev/null +++ b/audio/97.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fe09dc5bf67af9a6f92fcf38b02508567fb8ce34984e744908386add67de18f +size 3113324 diff --git a/audio/98.mp3 b/audio/98.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..25b62c47313ef75995abe35244594e65650334c3 --- /dev/null +++ b/audio/98.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a5c658f0ee134e0c31a9ca939ad7805a9a76a321f5e3728dd575ce734c250ae +size 1484396 diff --git a/audio/99.mp3 b/audio/99.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..34e9c6880398dfe1d777ed32bd0b8c82b9802f0f --- /dev/null +++ b/audio/99.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73519e4c374f1b7aa73fafe009ab248ad470a0a17e9b522d265af6293a246021 +size 1006406 diff --git a/transcripts/uncorrected/1.txt b/transcripts/uncorrected/1.txt index c6746054e40730a7d58b7683a2a3120f39bd9dfe..2937efe55b9250078fb5d0b659bfb3f378db1977 100644 --- a/transcripts/uncorrected/1.txt +++ b/transcripts/uncorrected/1.txt @@ -1 +1 @@ -I've recently taken up stock photography shooting, not as a commercial venture. I just recently bought a new phone, an Android, and it's the first time I've had a camera that actually feels like it can pretty easily create, take great photos. I've traditionally used like for my YouTube channel very big rigs and it's actually been I'm a belated convert to the joy of having something that can slot in your pocket because it does make creating content like spontaneous. The only thing I wish was actually it was less conspicuous because I love the type of stuff I do and a bunch of other people who aren't creating, is much better when there is less background stuff.

But anyway, I wonder something, so this right now is just like as I said it's a hobby, and it's like why these are fun photos, so I take a bunch when I'm out. I go through my reel, I create my own stock library, which if I get back into YouTube or writing I hope will stock always comes in handy when you least expect it. If nothing else I just enjoy kind of chronicling like right now I taking photos of the big buildings in Tel Aviv. I just enjoy chronicling what here and I feel like that legitimate.

I feel like a lot of people would turn around and say you're stupid why are you just taking images and posting for free people aren't even paying for them. I did sell some stock before and it just sounded it wasn't financially worth my time. So I always feel very defensive about it, like if people challenge it, especially because I'm very much against exploitation and content creation not being monetized. But I see what I'm trying to do at least as being a little bit different.

I'm not just trying to create pretty photographs. I see it as if there's a creator who wants to create content about stuff I also happen to really care about and lives in a different country and they can't easily just walk out the door and take photos of a construction site in Israel. If I can help that person with an image, I don't care about the fact that I don't make money from that. I'd almost feel less comfortable making money from such work. So, what do you say to that dichotomy? That there is... \ No newline at end of file +Generate a README for this repository. The README should state that this repository provides a pattern model for using personal context data in order to create AI agents with specific information about your life for a deeply personalized inference.

This pattern uses a voice recording that is then transcribed by a transcription agent and then provides that, then uses a chunking method, then uses a chunking agent to chunk that into individual context snippets.

Finally, the implementation is that these individual Markdown files are connected to a custom GBT, a Gem, or any other front-end supporting lightweight RAG implementation, or even OpenAI assistance would be suitable for this as well. \ No newline at end of file diff --git a/transcripts/uncorrected/10.txt b/transcripts/uncorrected/10.txt index a5ed8fd0dd34226e6d653b632b83d06cf0802662..065ebc48aa4c2ceb9dcb019ee4f9c5cee3096bd5 100644 --- a/transcripts/uncorrected/10.txt +++ b/transcripts/uncorrected/10.txt @@ -1 +1 @@ -Yeah, so the project level MCPs in Cloud Code are very, very good. It's a .mcp.json, and then there is, that's at the repo level. And then you have the .clouds.json, and that's at the file system level. So that's really the way you can add them in and I've just created some cloud code slash commands to instruct on installing.

What I want to do is for projects like the, for example, my website, Hey It Works, those are perfect ones where I would add the Contentful MCP to mcp.json just for their project. And that's an answer to my question previously. and so on.

So basically if you have multiple projects with different credentials, same MCP, but you just want to inherit different credentials per project, that's the way to do it. Now we don't know exactly if you want to have a master one for most projects and then just say just change different credentials for this project. I guess you could have one as just let's say Google Drive and then maybe Work Business Drive. You could create the same MCP with different names, that's one option. or you could create .mcp.json and those are kind of act as overrides as such at the project level. \ No newline at end of file +My two music videos I'm going to find on FAL, a cost-effective API for image to video, and then do like a prompt.

So if each, so then the target length, you need to work back to images, and from that, create videos.

So if you just have a library of images, that may seem useless, but they have, let's say, cars or people in them.

It's actually crazy, you can make music videos.

The other use I want to make for FAL before the HAG is to get Ronnie's book cover and get some preview photos, because they have a fine-tune for product imagery that we could use for all sorts of enticing promo images and maybe other things too. \ No newline at end of file diff --git a/transcripts/uncorrected/100.txt b/transcripts/uncorrected/100.txt new file mode 100644 index 0000000000000000000000000000000000000000..e3960e6d457375f71a0aa63d07c4c8ad4af74fc2 --- /dev/null +++ b/transcripts/uncorrected/100.txt @@ -0,0 +1 @@ +Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.

Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.

And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on. \ No newline at end of file diff --git a/transcripts/uncorrected/101.txt b/transcripts/uncorrected/101.txt new file mode 100644 index 0000000000000000000000000000000000000000..73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2 --- /dev/null +++ b/transcripts/uncorrected/101.txt @@ -0,0 +1 @@ +Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.

So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.

So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.

So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.

The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot \ No newline at end of file diff --git a/transcripts/uncorrected/11.txt b/transcripts/uncorrected/11.txt index 967c807454b0a74220c037fed4bb5c7b259c1fb5..710f2658007b401488207b052a87d41cadd9fdd6 100644 --- a/transcripts/uncorrected/11.txt +++ b/transcripts/uncorrected/11.txt @@ -1 +1 @@ -There's a videos page on the website and it currently pulls in an array of videos from Contentful which is great. I'd like to add a separate section to that website, it could be a tab and maybe the best implementation is that the this video list we currently have is called interviews and then the other tab is about impact and then for that one we'll just have the following contentful, sorry, cloudinary resources directly embedded and with the same lightbox set up as we have for the others. \ No newline at end of file +To act as a creative partner to the user with the specific purpose of ideating prompts for image to video.

The user will upload an image and in response your task is to come up with four unique and different prompts.

Using the image as context and imagining ways in which an AI image to video tool could create an imaginative clip from the still image.

You should return your ideated prompts in the following format.

Header with the short summary of the prompt and then the actual text of the prompt written within a code fence beneath it. \ No newline at end of file diff --git a/transcripts/uncorrected/12.txt b/transcripts/uncorrected/12.txt index 990a2d196c38a75d164d10b3eeeba44cdc6415ae..a542bfbe5f56fbd234045e7f5155ee7c6480b603 100644 --- a/transcripts/uncorrected/12.txt +++ b/transcripts/uncorrected/12.txt @@ -1 +1 @@ -What is the most cost effective API you could have for... It would be taking a headshot and going image to video, synchronizing it with audio which is diarized. And it would have to be scripted because it's a 90 minute recording so there might be as much as 40 minutes of dialogue for each of the two speakers that would need to be animated.

Is there any API that can do it cost effectively for this amount and what would be the approximate cost? \ No newline at end of file +I'd like to parse the meeting minutes.

In fact, an amazing workflow would be something like one of the AI tools that attends a meeting virtually, records the meeting, does STT, and then the workflow after that, which would parse that for action items, notarize it based on context of participants and who it is.

So in other words, it'd be a repository that it's like, here are the people that will be present, here even are maybe voice samples of them.

Here's the recording, STT and diarize, and then create those actions, those items from it. \ No newline at end of file diff --git a/transcripts/uncorrected/13.txt b/transcripts/uncorrected/13.txt index 23d92bc8dc9bfa4e504b3ce95a3ba1c52d761928..4e413a2d90bd9078f0afb570e225a5b7e9314c95 100644 --- a/transcripts/uncorrected/13.txt +++ b/transcripts/uncorrected/13.txt @@ -1 +1 @@ -What is the safety of drinking water from a bathroom faucet in Israel specifically? Is it in an area where you know the water is no issues with water supply, it's a residential apartment? Is the water always legally potable? \ No newline at end of file +I would like to create some product placement images using the book cover.

These would show the book in various contexts, intended for use on social media to promote the book.

I'm going to use an image tune model in file to create these.

What I would like you to do is develop a folder in this repository for product photo prompts and create a markdown file with a number of prompts that I can use in order to generate these.

Focus on generating a diverse collection of images showing the book cover in various contexts. \ No newline at end of file diff --git a/transcripts/uncorrected/14.txt b/transcripts/uncorrected/14.txt index 5a603b326fb72f83aeac0c5684079b32131729b6..26ce17d988ba51dd67153127701ea45f89624905 100644 --- a/transcripts/uncorrected/14.txt +++ b/transcripts/uncorrected/14.txt @@ -1 +1 @@ -They're getting a macro pad or control surface that can be used with Kdenlive by mapping to the keyboard shortcuts that already exist and has three, specifically for color correction.

I think any QMK, whatever it's called, thing could be adapted for it. \ No newline at end of file +So, when you upload MCP ads, I'm not exactly sure how it works, but it will create an MCP integration.

What I'd like is to have an integration for, the question is, if you want to target the same service with multiple credentials.

So I want to do, let's say, an MCP integration for my personal Google Drive, and then the work Google Drive, and then the one for Ronnie's projects.

And I want to say, in this repo, for example, the Google Workspace integration, that MCP is targeting this drive, so it's going to be using this credential.

So I'm not sure what the best way to set that up is, but I can look into it. \ No newline at end of file diff --git a/transcripts/uncorrected/15.txt b/transcripts/uncorrected/15.txt index b1426296d30ff0869552d23ba87ea8872a0ba3dd..2daabf46e1c3b81871ba24bce6f060b00094f96f 100644 --- a/transcripts/uncorrected/15.txt +++ b/transcripts/uncorrected/15.txt @@ -1 +1 @@ -I want to order a book about Python today and work through that course and see if the one that I'm paying for in Pluralsight has a Python course.

Python is real community, Python is real conferences, Twitter accounts, subreddits, map out the ecosystem for learning this really really thoroughly.

But beyond 3.13 for example, just like a big Reference. \ No newline at end of file +Zapier MCP is very very interesting. It will create an MCP server builder allowing you to create your own authenticated MCP server. So I've just gone ahead and created a server for personal and for, and I will do as well for the DSR holdings.

I want to note down the MCP server URLs, connect the tools, and then you can do it in Claude code on the, sorry, on Claude, like the web UI. Presumably that means you can connect it of course through any local MCP as well. \ No newline at end of file diff --git a/transcripts/uncorrected/16.txt b/transcripts/uncorrected/16.txt index 1d6b40cadcd1af0d2b5bcc01c9162dbc1436bbfa..3c094ba76d9f0a30693c5ac82c8c42fbc00d49e2 100644 --- a/transcripts/uncorrected/16.txt +++ b/transcripts/uncorrected/16.txt @@ -1 +1 @@ -The home server, which is actually an old desktop that was repurposed. It is an i3. The motherboard is very old. It's overall about an 8-9 year old computer with a very basic Nvidia GPU. and it's been fine for the workload so far because it was certainly gotten lots and lots of value out of the hardware. There's certain things that it can't do however. One of those things is the first real blocker I ran into was NVIDIA. Sorry, it was Frigate. Trying to run that just wasn't able to handle the... I couldn't do the GPU offload, I guess, because the GPU was too old.

And I kind of boomeranged in self in that I done a lot of it over the years. With AI, the advent of AI development however have actually warmed up to self again because deploying things at home and maintaining software has become a lot easier when you can debug common problems and handle installations with an AI assistant. One of the software products that I've deployed recently is called Resource Space. It's a digital asset manager, a DAM. And this is something actually that I've wanted for many, many years because I've been involved in content creation and photography and videography and I've always really wanted to build up my own stock library of assets.

Cloud hosted DAMs are just too expensive for hobbyists like me, if you want to call it like that, even though some of it's related to my job. They're typical enterprise products, so it's actually a perfect use case for self-hosting because having the media resources on the local environment makes sense from an editing standpoint, where I'm editing at home. And it provides something that I couldn't afford and makes it available for free.

With Resource Space, the constraint seems to be in I think all the workloads that go on. When you upload stuff, it then runs some metadata processing. It tries to run some facial recognition stuff. And it feels at this point that rather than one and without them it just really can't process uploads. So I kind of feel that I thought about maybe putting in a new CPU and increasingly I kind of think that well maybe actually that's not going to do anything for all the other constraints and maybe as I've decided to actually go into self-hosting and I see it as a long-term thing I want to keep doing for my business, maybe this would actually be a good time to just say I've gotten enough use out of this computer, maybe I want to keep the hard drives or the SSDs.

I mean maybe even not that, maybe it just a good time to say this is time for an upgrade. So what I'm looking for is what I keep what I always feel about whenever I open up a desktop I feel like there's just a lot of space that's not utilized in terms of physically, there's just open space in the case. I don't know where that is. And I'm wondering, I feel like for my current workloads, so it's Proxmox with ZFS and then there's Ubuntu on top of that. So I don't think a mini PC is going to be powerful enough to do all these workloads.

I do want to have an NVIDIA GPU, ideally. And many more. Hi, by 30cm long tower desktop. The way computers are bought in Israel, where I live, is actually mostly it's a spec-based ordering process by which you go into a computer store, you describe what you want, what you need. They will, you'll agree upon a spec, they'll give you a price. And then they'll actually assemble the computer for you. So it's not, I mean, you can buy off-the-shelf servers and whatnot, Amazon, and more.

And besides the other stuff that I mentioned our restreamer for the camera I would like to run Frigate. I would like to run Resource Space. And it would be nice to be able to run local AI inference, but I think I know that really pushes the budget up a significant amount. So with all that spec in mind, give me a few suggested specifications. And then importantly for each of those form factor in terms of what is the most compact form factor that I could maybe condense all of that into.

And finally, one option for buying outside of Israel is that you might visit the US in a few months. And if it's something that can be small enough that could fit into a suitcase it could actually bring it back but I'd rather not go down that route but just as a possibility. \ No newline at end of file +I'm wondering for the website, I'm interested in leveraging the best practices for AI spiders, large language models and indexing the website and specifically I'd like to adopt a friendly posture, by which I mean encouraging LLMs to scrape the content, etc.

I know this is a very new and emerging field, but are there any equivalent of a robots.txt that would create a permissive set of rules for AI tools? \ No newline at end of file diff --git a/transcripts/uncorrected/17.txt b/transcripts/uncorrected/17.txt index a6c4aa0dea473932cb03dfed9978e4ce2702e4a1..08c44fd850803dfc8c950a5a2bbc2a78c722b790 100644 --- a/transcripts/uncorrected/17.txt +++ b/transcripts/uncorrected/17.txt @@ -1 +1 @@ -For Frigate Plus, what I want to do is as follows. I'm looking into getting a new, getting a server. And I'm conscious that you want empty labeling and identifying labeling. Both of them. So I'm going to curate those or gather those on the cameras.

And now, and if slash when I do the server upgrade, the home server upgrade, and then I would move over to Freigate and then actually start using the train models.

Worst case scenario it's just $50 and I never actually end up using the stuff but I'm hoping that I will at some point. \ No newline at end of file +So I'm taking a lot of photos lately and I'm taking them for a few reasons. Firstly, I'm very interested in the developments in image to video AI. So I began, because I had some free time recently, I began to just create, take a lot more kind of stock photographs than I usually do, photographs of my area, certain things. And now I've discovered a second use for that in having a very rich library of things to animate into video which opens up a lot of exciting creative possibilities.

And my wife and I had our first son a few months ago so that was what really kind of kicked me off on it and I have a better phone. So for many years I've wanted to, I really like Google Photos and I have the unlimited plan and I recognize that it's impossible to get the level of photo storage that they provide. No one else is going to offer that for free. And I'm happy with it except that I find it doesn't really scale very well.

It's not quite mature enough as a tool for like if I wanted to organize my own stock library and I wanted to begin tagging them with a bit of metadata. The other thing is that if you create a lot of different albums there's not really any interface for searching through all your albums if I want to add the same shots to different albums. So it's a very good platform, but on a few aspects, I feel like I could use something a little bit more!

And some platforms that I thought were very much oriented towards professional photographers. But I think maybe also there's a use for, I guess what you could call D-A-M, because if I could extend it to video as well, it would bring another dimension of use. I don't want to self-host, I'm looking for cloud platforms, and I also definitely want to be able to sort through and upload to whatever it is from my Android, as well as from my computer. What are your recommendations? \ No newline at end of file diff --git a/transcripts/uncorrected/18.txt b/transcripts/uncorrected/18.txt index 57ee9e7328b60a23b4d9d39ea97021e9d3ff8e2d..73d38421338c02eafe5e883da004e2646bd841de 100644 --- a/transcripts/uncorrected/18.txt +++ b/transcripts/uncorrected/18.txt @@ -1 +1 @@ -So, I have a question. For image to video, it's currently expensive, very expensive actually. I'm trying to find a way. So I found the WAN models, which are by Alibaba. I find them to be very good, and they have a more affordable WAN model that I like using. And when I'm doing a video, I frequently gather up my images, gather up my prompts, and I move in towards a workflow by which I kind of do the storyboarding, gather the source material as I call it, the photos. Gather the prompt together, and then I will run it as a script, which is a very novel way for me of approaching content creation in the sense that it's programmatic and it's code first.

Which is a strange way to approach a creative process, but it works. And it seems to me at the moment to be the most effective way to do this because otherwise, before this, I was using a playground, running them one by one, importing them to a video editor, and it's just a lot slower that way. Now the issue is that image to video, as I mentioned, is expensive. And if I'm doing these projects for fun, I have a lot of ideas I want to do for fun. But even the cheaper WAN models are in the region of 10 to 15 cents per generation, which could easily, it's very easy to go through 20 or even 50 dollars, especially given the fact that frequently you need to generate the same prompt multiple times before you get a satisfactory result.

I really, really want to explore image to video, and I'm trying to find a way to have an affordable way to play around with it even if it's not the best model. And you know, so what I've been thinking of is I come across for a while providers like RunPod who do make GPU available either in serverless functions or they do per hour pricing on GPUs. And since I discovered Replicate and FAL, I've kind of wondered, well, if you can just make an API call, why go to the trouble of managing an instance of a machine? I'm thinking now that it might be the cost reason that if the machines are a certain price per hour, it might actually be a lot more cost-effective than using an API.

So my question is, firstly, is that the case? Is a frequent reason that people actually do these or use these services for cost mitigation? And so on. So that's the first thing. Secondly, serverless versus pods as RunPod calls them. I guess serverless almost makes more sense to me because you just pay for what you use and you don't need to worry about starting and stopping the pod and configuring auto shutdown policies. So what’s the reason that people go for pods over serverless?

And finally, if I want to do this, probably the objective would be, is there a way that you can have like your own API endpoint and that's running stuff on the serverless function in the backend? And what I get confused about for these things, the first time I did it, if I'm not mistaken, I did it with video generation. The video actually generated on my local, which seems almost like magic to me. So you're doing the actual inference rendering in the cloud. And is it just the case when that happens? And so on. And then just running my script and then I'm using on-demand compute. \ No newline at end of file +I think the enrichments that I'd like to get specifically for each company would be a short description, company they're based in, the name of the country as a name and also its ISO value, and then a category, which category the company belongs to, which we kind of added here but it's in a it's an adjacent array so maybe the easiest thing is to whatever ATEO suggests for conformity and so on. \ No newline at end of file diff --git a/transcripts/uncorrected/19.txt b/transcripts/uncorrected/19.txt index a2ad0808542f04e9e26405fe883f5a3a95fa8ce7..2fc8d99f4f52ac3922262fdfaf89adb89d4e1630 100644 --- a/transcripts/uncorrected/19.txt +++ b/transcripts/uncorrected/19.txt @@ -1 +1 @@ -Yeah, I think I would look for... the truth is, I was initially... I have to try out my Cherry Red keyboard, the split one is a long term thing. But in the short term I have to say I've really warmed to MX Brown, and I think at this point I probably would use any MX Brown keyboard without noticing much of a difference from the AliExpress one, which is a brown imitation.

And this frankly one is it's a wired one and what I would like probably I'm thinking at the moment I wanted to set up a binding for cloud code and I think that rather than go down in the macro pad approach, which is one way, one approach certainly, it would be really nice to have a keyboard with built-in macro keys.

I think the MX Red one that I got has about five macro keys and I'm wondering if you can put about, you know, if you put up the entire top of the keyboard or the number pad, which I'm looking at the keyboard now. A lot of the keys that I rarely use are the sound controls, the number operators, pause, scroll lock, print screen. There's probably about 20% of the keyboard that I rarely touch.

Do you have any recommendations for a brown keyboard? Let's say I don't like compact keyboards, so I do like the full-size keyboard. The small keyboards feel cramped to me, but that has a full keyboard section and then maybe fills up some space on the right and along the top with macro keys, and so that rather than adding on micro pads you can just create some assignments on the keyboard itself. \ No newline at end of file +A common frustration when working with code generation tools in Python projects is their tendency to use system Python. I've tried to add as a commonly added workspace rule to instruct the agent to always create a virtual environment using UV for package management. However, even when that's instructed in Claude.md, the agent often fails to adhere to the instruction and tries to install packages into system Python.

So I frequently find myself having to prompt, "please activate the virtual environment, please use the virtual environment." What I've noticed is that even when it does this, it sends the activation, VendActivation command in each bash completion, which is inefficient. A human developer would activate the virtual environment once and then run commands regularly within the activated virtual environment.

A more elegant solution would be for Claude code to have the ability to detect, to not only create virtual environments for instructions, but to detect them based on their folder pattern, activate them where they exist, and use them in a more efficient manner by activating them once and then holding that as a persistent bash environment for the session. \ No newline at end of file diff --git a/transcripts/uncorrected/2.txt b/transcripts/uncorrected/2.txt index 91b026892004ed21a3ba811a8f0f0669aed62925..3369ab367280f0fc5f0f39307d2ee9988076fee0 100644 --- a/transcripts/uncorrected/2.txt +++ b/transcripts/uncorrected/2.txt @@ -1 +1 @@ -Capturing the beauty of what you perceive to be the beauty of occupied skyscrapers at night time. Capturing the details of the building with clarity, seeing the contrasting the illuminated exteriors with the dark exteriors, seeing from the outside, looking in, what kind of camera would be good for that. I mean maybe it's just a general purpose camera with good low light performance.

Lately I been shooting photographs of urban scenes on a smartphone which has actually far surpassed my expectations for quality. I been very impressed with it but there a big drop in clarity at night time which I know is kind of the way it goes with this kind of lens or whatever. It's a Nord 1 plus Nord. So for this kind of footage, I wouldn't want to have a huge lens because it's fun to capture stuff in urban environments. And when you're doing this kind of photography, videography, elaborate gear becomes very quickly a big burden. So it has to be something that did this well. It wasn't a pain to carry. What do you think would be a good choice? \ No newline at end of file +Go through the website and just make sure that we don't have any emojis trying to serve the place of icons, because they don't work. We should use whatever icon theme is, I think it's almost fully implemented now, but that should be used consistently throughout the website.

The other thing I would say is for, I used to have redirects like danielrosehill.com forward slash github to my github page. If you look in the social profiles, the social page, if I could have redirect routes for all of those networks, like danielrosehill.com forward slash n8n to that profile, etc, it would be great. \ No newline at end of file diff --git a/transcripts/uncorrected/20.txt b/transcripts/uncorrected/20.txt index e0ca9c5f871fe1db6ec60a09ae492e1cb1614512..1bf87876e16c3c80ab5a0801cf18f92e6a83bae3 100644 --- a/transcripts/uncorrected/20.txt +++ b/transcripts/uncorrected/20.txt @@ -1 +1 @@ -Okay, so for Kdenlive, I wanted to get a macro pad with three toggles for video editing, a control surface in other words. I know that people on use, there's a few macro paths or there's a large community of people who have adapted different things for use with Kdenlive as control panels or control surfaces as they're called.

I have a friend who is a photographer and he bought an off-the-shelf controller and used it as a control surface for something else. And it made me think, is there anything that people commonly use for Kdenlive? What would be really helpful would be the three wheels for color correction, which would probably be... Those are, I guess, kind of toggles, and then scroll wheels for three scroll wheels, and it's always in pairs of three for that. But yeah, those are the ones that people commonly use and like. \ No newline at end of file +In certain applications, Android applications like Pexels for example, they seem to process uploads a lot better, Pexels as media, when I have the application open, as in in the foreground, in focus.

My question is, I thought, with Android you can give stuff background permissions and whatever, but it does seem to make a substantial difference.

I can't get the same results. And when the app's in the background even if I even if I'm using other things on my phone as when I physically have the app like in focus.

My question is why is that the case? Isn't it easy for developers to allow background data access? Like what's the reason for that? Is it in my imagination or is it really something that's happening? \ No newline at end of file diff --git a/transcripts/uncorrected/21.txt b/transcripts/uncorrected/21.txt index 2a58c5f30c61703af3ed4fd13d9b1c23315f1326..c6746054e40730a7d58b7683a2a3120f39bd9dfe 100644 --- a/transcripts/uncorrected/21.txt +++ b/transcripts/uncorrected/21.txt @@ -1 +1 @@ -So I'd love to get your thoughts on the following. There's a tweet from Sam Altman that he wrote a few years ago and it's aged quite well as they say. He was announcing the release of ChatGPT and maybe an early iteration of ChatGPT, maybe 3 or 3.5 or something like that. Maybe even an earlier one. And the tweet went something like, it's our conversational, or first it's a conversational model or something.

And what's interesting to me about this is that I discovered AI through ChatGPT or got excited about it through that interface. And then from there worked back to more instructional workloads as then I used it as a chat interface, then began using LLMs through their API endpoints and then began using them programmatically and scripting and using them on my local computer. And now I doing much more of that than I am using them as chatbots.

I know a lot of people, I think even people who are pretty technically literate, aren't really aware that there's, that there's, AI can be used in this way. But what's interesting about that tweet I mentioned is it inferred that instructional models actually predate conversational models. In other words, that I think what he was saying was that OpenAI had developed GPT firstly for instruction following, and then they sort of refined it for conversation.

And what I'm curious to know is, is that accurate that instructional models predate conversational models and if so by sort of how long? \ No newline at end of file +I've recently taken up stock photography shooting, not as a commercial venture. I just recently bought a new phone, an Android, and it's the first time I've had a camera that actually feels like it can pretty easily create, take great photos. I've traditionally used like for my YouTube channel very big rigs and it's actually been I'm a belated convert to the joy of having something that can slot in your pocket because it does make creating content like spontaneous. The only thing I wish was actually it was less conspicuous because I love the type of stuff I do and a bunch of other people who aren't creating, is much better when there is less background stuff.

But anyway, I wonder something, so this right now is just like as I said it's a hobby, and it's like why these are fun photos, so I take a bunch when I'm out. I go through my reel, I create my own stock library, which if I get back into YouTube or writing I hope will stock always comes in handy when you least expect it. If nothing else I just enjoy kind of chronicling like right now I taking photos of the big buildings in Tel Aviv. I just enjoy chronicling what here and I feel like that legitimate.

I feel like a lot of people would turn around and say you're stupid why are you just taking images and posting for free people aren't even paying for them. I did sell some stock before and it just sounded it wasn't financially worth my time. So I always feel very defensive about it, like if people challenge it, especially because I'm very much against exploitation and content creation not being monetized. But I see what I'm trying to do at least as being a little bit different.

I'm not just trying to create pretty photographs. I see it as if there's a creator who wants to create content about stuff I also happen to really care about and lives in a different country and they can't easily just walk out the door and take photos of a construction site in Israel. If I can help that person with an image, I don't care about the fact that I don't make money from that. I'd almost feel less comfortable making money from such work. So, what do you say to that dichotomy? That there is... \ No newline at end of file diff --git a/transcripts/uncorrected/22.txt b/transcripts/uncorrected/22.txt index 7ade92ea48527be48c9ed28805bb0153509bb3a1..91b026892004ed21a3ba811a8f0f0669aed62925 100644 --- a/transcripts/uncorrected/22.txt +++ b/transcripts/uncorrected/22.txt @@ -1 +1 @@ -Here's my idea for an AI podcast workflow. I think if it's just questions summarized by AI and people know that the whole thing is text to speech, it's a little bit off-putting because people think I don't want to listen to just a robot speaking the whole time.

I think if the podcast format was that my voice prompt actually makes it into the final output so it starts with me recording a voice prompt as I'm doing now, then that gets transcribed. Then the rest of the workflow is the same, but what I do for the actual episode render is I combine my voice prompt with the AI response. So that you really get the feeling that it's me actually asking something that's definitely not AI. That I'm an identifiable person speaking. And then the podcast goes from there.

I think it would be more effective and more impressive and more enjoyable to listen to. \ No newline at end of file +Capturing the beauty of what you perceive to be the beauty of occupied skyscrapers at night time. Capturing the details of the building with clarity, seeing the contrasting the illuminated exteriors with the dark exteriors, seeing from the outside, looking in, what kind of camera would be good for that. I mean maybe it's just a general purpose camera with good low light performance.

Lately I been shooting photographs of urban scenes on a smartphone which has actually far surpassed my expectations for quality. I been very impressed with it but there a big drop in clarity at night time which I know is kind of the way it goes with this kind of lens or whatever. It's a Nord 1 plus Nord. So for this kind of footage, I wouldn't want to have a huge lens because it's fun to capture stuff in urban environments. And when you're doing this kind of photography, videography, elaborate gear becomes very quickly a big burden. So it has to be something that did this well. It wasn't a pain to carry. What do you think would be a good choice? \ No newline at end of file diff --git a/transcripts/uncorrected/23.txt b/transcripts/uncorrected/23.txt index ed50ed359bc55372aab37746585a31f7525ccc9a..8b700f13501d324ea71f0435b13596b6edfa6d73 100644 --- a/transcripts/uncorrected/23.txt +++ b/transcripts/uncorrected/23.txt @@ -1 +1 @@ -I will try to build. What I want to build is this: I don't know, is there a name for this kind of workflow? So let's say I go out taking B-roll. Now, right now I'm using a lot of it for populating my own library, and sometimes I share it with stock libraries. And usually, they strip the sound. I like to have a workflow in which, well, my ideal workflow would probably be something like this.

Let's say I have a folder full of media and P4 files. I can usually end up with a few mistakes, unintentional takes, and those usually would be like kind of less than five seconds duration. Usually, I just eyeball and I look for the ones with a small file size that's too small. Next thing I like to do would be stripping out the audio, batching, putting the video into its own folder, and then maybe, because for stock I'm shooting it handheld, it should be stabilized. So, stabilization.

So it's basically a pipeline. And my question is this: can this be done? But if I want to build a few pipelines like this, this is, let's say, my stock video pipeline. I might have another pipeline for sorting, so I might have a few media pipelines, and I don't want to have to go every time into a repository and run it. But it does make sense that it's just a script, basically.

So what's the best way to have a few scripts? I'm basically asking what's a good GUI for this kind of workflow? I want to have my media folders, and then I want to say run this script within this folder, and that would take the TDM out of setting up and resetting up environments and Python and all the rest of it. So what would you recommend as a tool for doing that? \ No newline at end of file +Your task in this repository is to write a script which will process all the audio files in the audio folder in the order in which they're currently appeared by timestamp.

The objective is firstly to remove any periods of silence and secondly to concatenate the audio recordings into one recording and to optimize them for clarity of voice.

The audio files contain recordings of two people speaking and the objective is to make them as easy to hear as possible. \ No newline at end of file diff --git a/transcripts/uncorrected/24.txt b/transcripts/uncorrected/24.txt index acc8d62d6d5b71235676ccf824c7860bf8c12d53..ca6ef51df22df40dd430aa5e130b5e2530c23d7f 100644 --- a/transcripts/uncorrected/24.txt +++ b/transcripts/uncorrected/24.txt @@ -1 +1 @@ -I have a question here. I was exploring lately, getting up earlier, and it always really appealed to me. The idea of getting in sync with the sun, like the natural diurnal cycle. Stricadian rhythm, when the sun goes down approximately that's when you get ready for bed. When the sun comes up, that's maybe when you get ready, that's when you get up. But that would require, in the winter time at least, here, where I live, going to bed as early as, I mean I guess it depends. Whether you'd want to go to bed immediately at sundown, I think that's probably not realistic, and a couple of hours later. But even if you did the latter, you'd be talking about going to bed at like 8 o'clock in the winter, maybe as early as 7.

Now my question is, my interest in this really comes from a question I've always wondered or thought about, which is that until relatively recently there was no such thing as artificial illumination that you could click on with a switch in your home at least, and even the concept of street lighting being totally reliable and totally every street in a developed city being covered in street lighting, that was also a foreign concept. So in the evolution of humans, it seems to me it must be the case that this is a very recent adaptation.

So my question is really, from the historical record, what do we know about the kind of sleep cycle that humans gravitate to naturally when there isn't alternative lighting? Artificial lighting. Thanks for watching! \ No newline at end of file +I'm curious to know sometimes when I have a beer I feel fine and I have digestive issues since having my gallbladder surgery about six years ago. I mean, very very troublesome. But lately I was able to tolerate some amount of alcohol and today, as an example, I haven't eaten food yet. Maybe that's the cause. I just had a beer, an IPA at room temperature. There's a celebration going on in our country due to political news. But it's made me so bloated that it's hard to breathe. My stomach is like, I can just feel it expand out.

So this wasn't like a binge, it was just one beer. But the effect was very dramatic and very sudden. Maybe as soon as, I'm going to say, 10 minutes after consuming it. I'm already like I have a huge beer belly that just appeared out of nowhere. So I guess my question is, I always feel like cold drinks go down easier. I'm trying to figure out the situation for a long time. Because it's nice to be able to drink the odd beer without being bloated like a pregnant person. What do you say about this? \ No newline at end of file diff --git a/transcripts/uncorrected/25.txt b/transcripts/uncorrected/25.txt index 8a430e2e093e18c208dfeff13e05eabe999f06dd..4a7a1d90e43a9e5403edd584c7f643c7b95625ab 100644 --- a/transcripts/uncorrected/25.txt +++ b/transcripts/uncorrected/25.txt @@ -1 +1 @@ -I have a Nord 3 5G and I'm looking for a power bank. It supports this fast charging protocol. I think it's called SuperVOOC. And I was looking for a power bank that could basically charge it as quickly as possible, deliver the fastest charging that it can support from a non-AC outlet.

I got one from Bezeus before. I don't know what it was, it was 65W, I don't know if that's relevant for mainly smartphones or if it's just for laptops. But in any case, I think I've lost that power bank, so I need a new one.

Now I guess what I would probably like is the biggest capacity that you can fit into a power bank form factor. By which I mean, at a certain point, we're not really mobile, they make these power stations I think they're called. So the biggest thing you can get, and not an exaggerated spec but a real credible spec in terms of the mAh.

And the quickest, the combination of the quickest charging and the biggest capacity for this particular phone. Anything you'd recommend from Mosaic or other, let's say more credible manufacturers? \ No newline at end of file +So I got a Bezeus 65W charger for my OnePlus Nord 3 5G and I see there's also 22W. What does that number refer to? And can I get maximum charging speed from the 22W one if I'm not using any of these for a laptop? \ No newline at end of file diff --git a/transcripts/uncorrected/26.txt b/transcripts/uncorrected/26.txt index 839741289e07ba1b0cbfd1312e40cff52a27de8a..c88cdbee095d6b251fb7f4c16b01d2952b2e8b43 100644 --- a/transcripts/uncorrected/26.txt +++ b/transcripts/uncorrected/26.txt @@ -1 +1 @@ -I'd like to create a voice recording app for Ubuntu Linux. The app should have the following functionalities. It's a voice recorder, and it has the essential voice recording functionalities of record, pause, stop, and restart. The restart scraps the current recording in cache and restarts the recorder from zero.

For the transcription process, I'd like to institute the following workflow. We'll use Google Gemini API and ensure that we're using Gemini 2.5, which supports multimodal input, including audio. The recording captured from the user should be optimized for this purpose of voice capture for speech to text. By which I mean, I would suggest that we record in mono. We capture the recording in a space-efficient format. We're optimizing for creating a voice recording that is not necessarily the greatest and most detailed of audio clarity, but which strikes the best balance between quality and space efficiency for transcription.

The voice recording will get sent to Gemini for transcription with a system prompt that instructs it to transcribe and also clean up the recording by removing filler words, adding sentence structure, and adding spaces. There can be a second button which is called transcribe and optimize, and the transcribe and optimize workflow is the same except that the system prompt is a little bit more instructive and it tells Gemini, in addition to those steps, to remove filler words, add sentence structure, paragraph spacing, and try to optimize the text by adding headings, organizing the thoughts a bit, and removing repetition, so it's a little bit more aggressive.

In both cases, the transcription, when it returns from Gemini API, will populate into a... In fact, Gemini should return two things, a title and a text. The title is a short title for the voice note. The text is short is the text, and so on. The text is formatted in Markdown; it should appear within the Markdown within the text editor. There should also be a clipboard button, and finally, the user should be able to save the note.

When the user chooses to save the note, it will get saved to a predetermined folder which the user selects as where they save voice notes on their operating system. And it's saved there as a Markdown file with the title in machine-friendly format. So if the voice note title has spaces, the saved file name will just replace those with hyphens.

The app would be run repetitively such that if the user wants to record a new note, they start again, and when they do transcribe or transcribe and optimize, it will send and then overwrite the previous transcription. So the user has to click the save button in order to save it, or there can be an option for auto-saving configurable as a user setting. \ No newline at end of file +I built a couple of days ago, just to note some of the things I've been working on. So I recorded a long conversation with my mother AC and I wanted to transcribe it so I did that with Whisper. But I wanted to get it into SRT format. So a few good things occurred from this and I just want to know what they are.

The first is I understood that Hugging Face spaces, which are of course apps. So what you can do is you can create your own space. I can create a private space and choose to run it on any of their GPUs. The GPUs are on demand so I can create let say I guess what I previously thought was well hugging face spaces are great because you can prototype an idea and then you can share it etc They often used to demonstrate ideas or spaces What I didn't realize and what just clicked with me is you can create, let's say for example, a whisperer like what I did the transcription workflow. and or an image to video workflow that's not going to be it's not valuable to run locally as is more often the case than not especially for the current hardware you have which is AMD you deploy that up to Hugging Face and then you run it on their enterprise-grade hardware so you can run up to a quantum computer which is obviously something that no ordinary computer and many more

For FAL AI if you paying let say 25 cents a clip it not cost effective Google is like a clip But if you run your own, now I'm finally understanding why people pay for on-demand GPUs or they have provisioned GPUs All of these are great because some of the medium grade hardware that Hugging Face provides is priced at about $1-$3 per hour. Which is actually quite cheap and an awful lot cheaper than paying per generation. So the danger with these things of course is everyone's nightmare is leaving a serverless function turned on and then getting a huge bill. So they have pause settings. So that was one thing.

The other thing I discovered the thing I sort of connected finally is why it useful to duplicate spaces on Hugging Face I don really understand what the point was Someone created something that works So if you can duplicate a working space, you don't need to build it. But then it's private, so it's not... You know that nothing's happening, and then you can run it on your own terms in your own hardware.

And then finally, there is RunPod and Modal for on-demand GPUs. and many more. I'm also using this for this purpose. I was building a pipeline with PyAnode for diarization that doesn't really exist well in an API and then building these things yourself, pulling down these massive Python libraries every time is just way too time consuming. So if you create something and try it out in an environment that's much much faster, that's why these things are valuable. \ No newline at end of file diff --git a/transcripts/uncorrected/27.txt b/transcripts/uncorrected/27.txt index b7896c7f96af437ec44fecaba4cd587b9fd8c785..ab9a2f95ba91effd6318d8c0d1348ceec4164b4e 100644 --- a/transcripts/uncorrected/27.txt +++ b/transcripts/uncorrected/27.txt @@ -1 +1 @@ -Okay, so the basic validation of the app is good. It functions according to spec.

I'd like to just remove the emojis and please take a look at the screenshots of the app as it's currently implemented and see if you can think of any design and UI optimizations that would make it even more friendly to use.

For transcribe and optimize, we definitely would like to have a label transcribe and optimize.

Maybe let's have a homer text or an about section where we describe to users the differences between these two functions. \ No newline at end of file +The top thing I want to, the technical thing I want to find today is the whisper hotkey. I can try to use Claude. Or maybe there is some, maybe it really is a live coding thing. Or there is some, the speech, I think there is a speech subreddit, speech tag. And I think it's going to get bigger and bigger.

The other thing I want to think about is microphone positioning for the one that I have the ATR mic. It's actually a mono source mic and I probably will get much different results. I think actually I might go back to the Gooseneck and keep this one just for I guess maybe on the road use. I think the pickup is better, but it's worth actually thinking about that. It may be just trying a few audio samples head to head.

And in fact it would be very easy to do an evaluation with almost really scientific in nature. Record the exact same thing, exact same phrase into a microphone in controlled conditions, in other words it's a note down the source, note down the distance, similar gain, note down the parameters in other words, all using the same Whisper model and then compare the and the speech arrays for every microphone. It would be interesting to know what the best results have and the results of comparing head to head.

Wearable headset Bluetooth headset lav mic and then for the non microphones comparing the Samsung Q2U with the ATR with the other ATR, the boundary mic, and with the gooseneck microphone. And seeing which of those we say reliably is the best in class for both modes of operation. And of course it's going to vary a little bit more but it would be surprising actually if someone hadn't done the research it would also be interesting to look at what Philips are bringing to market at the moment maybe even sort of plan it down the road as my ideal.

Yeah, it's $470 a dictation microphone. I'm not sure what would be the best use case to look to in terms of critical accuracy. I would say probably something like medical use, like radiology or transcription, because a lot of these are built for on-the-go use, where you have the context that's most fitting to my one of someone sitting at a desk. Now it's not guaranteed to be quiet with Ezra, so there's a bit of background noise going on, but it's still a relatively stable acoustic environment and in these contexts where you know no one's compromising on on a hundred dollars here or there what do people reach for as the gold standard? \ No newline at end of file diff --git a/transcripts/uncorrected/28.txt b/transcripts/uncorrected/28.txt index c4062c3c839f500b2242b1b7628a7ef9e4bd26f0..22d59168820d520421fbe4bc4a1965b66ebe8127 100644 --- a/transcripts/uncorrected/28.txt +++ b/transcripts/uncorrected/28.txt @@ -1 +1 @@ -I would like to create a docs folder in this repository.

The docs folder should be separate from the code and it will be the place in which documentation is gathered.

Ask the user if there is any specific functionalities or aspects of the application that the user wishes to document in this folder.

The docs folder should be mentioned and linked in the readme, directing users to it for more extensive documentation than can be found in the readme itself. \ No newline at end of file +I can try for my, I don't know with my notes editor that I had before, did I ever try it with a fully local version? Because I was thinking now, for the one where you do record voice and transform, if the back end was Whisper and, let's see, Lama 3.2, you know Lama, you made it very specific. This is what it's supposed to be using, even just for my own use. And you set up the environment.

That could be local mode, you could have local mode and remote mode. That could be a very, very useful tool. And if you just had one simple cleanup prompt, this could be the latest iteration of the text cleanup utility. Keep it simple, asynchronous.

SpeechNote but kind of pared down to just really really good capture and the cleanup thing which they don't, SpeechNote has never integrated that post-processing step which would differentiate it. \ No newline at end of file diff --git a/transcripts/uncorrected/29.txt b/transcripts/uncorrected/29.txt index ec565a8b602b1abb235b4c8a5616370d701f5be7..ae3da7506e205016728f0028c258d57cd3e14246 100644 --- a/transcripts/uncorrected/29.txt +++ b/transcripts/uncorrected/29.txt @@ -1 +1 @@ -Please go through the markdown files in this repository to make sure that no emojis have been used.

If you find any emojis, remove them.

If emojis have been used in place of proper icons, then identify an appropriate icon library that could be used to provide the emojis.

Remember that if the icons are well known, such as the icons from major social networks, these should be integrated via a pre-designed library.

Do not attempt to create custom once-off SVGs for any logo that likely already exists in a professional library. \ No newline at end of file +I should add integration to Cloudinary also for the website of the world. And environment variables as well, just making sure that they're copied in there.

And I'll see as well for Vercel if you can have the deployment be per branch. So there's a preview branch, you want to set that up as well. \ No newline at end of file diff --git a/transcripts/uncorrected/3.txt b/transcripts/uncorrected/3.txt index 8b700f13501d324ea71f0435b13596b6edfa6d73..4e0633c294c7c3433c6d0da62bc32c709eec1fb6 100644 --- a/transcripts/uncorrected/3.txt +++ b/transcripts/uncorrected/3.txt @@ -1 +1 @@ -Your task in this repository is to write a script which will process all the audio files in the audio folder in the order in which they're currently appeared by timestamp.

The objective is firstly to remove any periods of silence and secondly to concatenate the audio recordings into one recording and to optimize them for clarity of voice.

The audio files contain recordings of two people speaking and the objective is to make them as easy to hear as possible. \ No newline at end of file +Something I'm learning for installed comfy UI. I actually made a lot of progress today on my projects using PyTorch and ROCM. You can actually reuse, this is I guess the advantage of AI being so, like a lot of different things like let's say image to video, image generation, text generation, large language models, all being based on the transformer architecture. It means that a lot of the heavy packages like PyTorch and ROCM can be used for environments with totally different objectives and use cases.

So today when I was installing comfy UI, then I installed a manager, then I wanted to install, I was working on all these things, a fine tuning STT environment and it kept being PyTorch and ROCM and that's a really big heavy download. I think by Docker, this is not the Dockerized version, but by Docker it's a really really big one. So I said this is crazy and when I got from the Python subreddit was that I asked what people do when they have many many environments and people responded that mostly they just have a base environment. That's like kind of for most projects.

So what I'm in the process of doing at the moment is using my existing content environment, sorry I created a new environment for PyTorch ROCM and that environment is going to be what I add, what I use for these projects and now I'm in the process of editing the launcher bash wrappers for these projects to all use that common content environment which I think I was using Claw to calculate it would save about 50 gigabytes of disk space. Not that that's a gigantic amount, but it's more the time saving and having to repeatedly create and recreate these same environments. \ No newline at end of file diff --git a/transcripts/uncorrected/30.txt b/transcripts/uncorrected/30.txt index 97dc205e9d7b77068f580705263f66d3a0ce82b0..a5ed8fd0dd34226e6d653b632b83d06cf0802662 100644 --- a/transcripts/uncorrected/30.txt +++ b/transcripts/uncorrected/30.txt @@ -1 +1 @@ -Go through the website and see any place in which icons have been implemented which were custom designed but which could have been implemented more efficiently through using an existing icon library.

Pay particular attention to icons for common uses such as social media icons which exist in many libraries, as well as emojis which may have been used in place of icons.

This approach should not be followed.

If the user uses an existing icon library that you can identify, then replace the custom coded icons with the most appropriate matches.

If the user hasn't yet implemented an icon library, provide some suggestions to the user, focusing on those libraries which will best match the aesthetic which they are following in their designs. \ No newline at end of file +Yeah, so the project level MCPs in Cloud Code are very, very good. It's a .mcp.json, and then there is, that's at the repo level. And then you have the .clouds.json, and that's at the file system level. So that's really the way you can add them in and I've just created some cloud code slash commands to instruct on installing.

What I want to do is for projects like the, for example, my website, Hey It Works, those are perfect ones where I would add the Contentful MCP to mcp.json just for their project. And that's an answer to my question previously. and so on.

So basically if you have multiple projects with different credentials, same MCP, but you just want to inherit different credentials per project, that's the way to do it. Now we don't know exactly if you want to have a master one for most projects and then just say just change different credentials for this project. I guess you could have one as just let's say Google Drive and then maybe Work Business Drive. You could create the same MCP with different names, that's one option. or you could create .mcp.json and those are kind of act as overrides as such at the project level. \ No newline at end of file diff --git a/transcripts/uncorrected/31.txt b/transcripts/uncorrected/31.txt index 0f9f01aeb1efa9b56a188dbecffed93a32cfd7c5..967c807454b0a74220c037fed4bb5c7b259c1fb5 100644 --- a/transcripts/uncorrected/31.txt +++ b/transcripts/uncorrected/31.txt @@ -1 +1 @@ -This repository contains a collection of slash commands which I use with Claudecode.

I capture some of the slash commands using speech to text.

The slash commands that have been captured with dictation frequently lack elements like punctuation, paragraph spacing, and they may contain occasionally words that were mistranscribed.

Please recurse through the directories and correct slash commands which you can find which were missing these basic textual features but do not limit your fixes to only I don't want to go into those containing these defects but rather consider in your editing any slash commands which need to be rewritten for optimal intelligibility. \ No newline at end of file +There's a videos page on the website and it currently pulls in an array of videos from Contentful which is great. I'd like to add a separate section to that website, it could be a tab and maybe the best implementation is that the this video list we currently have is called interviews and then the other tab is about impact and then for that one we'll just have the following contentful, sorry, cloudinary resources directly embedded and with the same lightbox set up as we have for the others. \ No newline at end of file diff --git a/transcripts/uncorrected/32.txt b/transcripts/uncorrected/32.txt index 35a25c66c27c2d44f0a64ca785442bcb2b03db07..990a2d196c38a75d164d10b3eeeba44cdc6415ae 100644 --- a/transcripts/uncorrected/32.txt +++ b/transcripts/uncorrected/32.txt @@ -1 +1 @@ -This repository contains a folder of screenshots.

The intended use of the screenshots is that they will be integrated into the README or other documentation to demonstrate the UI of the app.

It's important therefore that the screenshots have descriptive file names.

Please rename the screenshots for this purpose and integrate them into the README in the most appropriate section. \ No newline at end of file +What is the most cost effective API you could have for... It would be taking a headshot and going image to video, synchronizing it with audio which is diarized. And it would have to be scripted because it's a 90 minute recording so there might be as much as 40 minutes of dialogue for each of the two speakers that would need to be animated.

Is there any API that can do it cost effectively for this amount and what would be the approximate cost? \ No newline at end of file diff --git a/transcripts/uncorrected/33.txt b/transcripts/uncorrected/33.txt index c3e6aec46313e6c703697e4fcc48f050db3015c1..23d92bc8dc9bfa4e504b3ce95a3ba1c52d761928 100644 --- a/transcripts/uncorrected/33.txt +++ b/transcripts/uncorrected/33.txt @@ -1 +1 @@ -What's the most professional way to install a package on Linux? If I create an executable and copy that into the directory on path, such that I can call it, is that considered a worse way to install applications than through a Debian package? \ No newline at end of file +What is the safety of drinking water from a bathroom faucet in Israel specifically? Is it in an area where you know the water is no issues with water supply, it's a residential apartment? Is the water always legally potable? \ No newline at end of file diff --git a/transcripts/uncorrected/34.txt b/transcripts/uncorrected/34.txt index 72dd47f2927e95f6a555120604796efb0f7010e8..5a603b326fb72f83aeac0c5684079b32131729b6 100644 --- a/transcripts/uncorrected/34.txt +++ b/transcripts/uncorrected/34.txt @@ -1 +1 @@ -Your task is to take this system prompt and rewrite it for implementation in a structured AI system.

In order to do so, adhere to the following instructions.

Within the text of the prompt itself, define the The JSON output that the AI should be constrained to giving.

And instruct the AI tool that it is working in a structured workflow and must only return valid JSON.

Create a folder for the prompt.

And add in addition to the rewritten prompt text.

You should also create a .json file containing an Open API compliant JSON schema and finally and you create another JSON called object.json which contains just the JSON object. \ No newline at end of file +They're getting a macro pad or control surface that can be used with Kdenlive by mapping to the keyboard shortcuts that already exist and has three, specifically for color correction.

I think any QMK, whatever it's called, thing could be adapted for it. \ No newline at end of file diff --git a/transcripts/uncorrected/35.txt b/transcripts/uncorrected/35.txt index 76af9ed38a7f3a464480738293afb78a25ff5929..b1426296d30ff0869552d23ba87ea8872a0ba3dd 100644 --- a/transcripts/uncorrected/35.txt +++ b/transcripts/uncorrected/35.txt @@ -1 +1 @@ -Okay, so here is the type of license that generally work for me for open source projects. I usually open source software because I've created something useful. I think other people might either find it helpful or develop upon the idea to do it to take my idea and ability further. Attribution is always appreciated but I'd only want to make it mandatory if that wouldn't really sort of create friction with other people who'd like to use a project.

But attribution really helps me because it opens up the relationship and connectedness of open sourcing because if someone were to use it downstream, they have a way to sort of get in touch with me. People commercializing open source software doesn't sit very well with me, but again, it's only if it's, I'd be very reluctant to add that as a limitation.

Other than that, nothing else really stands out to me as something that I'd require. Like if people took it in any other direction, it's fine. The only one I think about sometimes is obviously no one wants something that creates to be sort of misused or used for harm. And one also doesn't want to end up with lawsuits if something they create is misused, so I don't know if there's any legal language that can create a little bit of protection around those potentials. \ No newline at end of file +I want to order a book about Python today and work through that course and see if the one that I'm paying for in Pluralsight has a Python course.

Python is real community, Python is real conferences, Twitter accounts, subreddits, map out the ecosystem for learning this really really thoroughly.

But beyond 3.13 for example, just like a big Reference. \ No newline at end of file diff --git a/transcripts/uncorrected/36.txt b/transcripts/uncorrected/36.txt index b57417cde1c5303a489404bcd259f827ea2cf7a6..1d6b40cadcd1af0d2b5bcc01c9162dbc1436bbfa 100644 --- a/transcripts/uncorrected/36.txt +++ b/transcripts/uncorrected/36.txt @@ -1 +1 @@ -The problem is that we looked at this before and when it reboots the router it's not bringing up the Cloudflare tunnel.

So see it's working now, but just see what can be done to make sure that this, we need to make very certain that this does start automatically on reboot. \ No newline at end of file +The home server, which is actually an old desktop that was repurposed. It is an i3. The motherboard is very old. It's overall about an 8-9 year old computer with a very basic Nvidia GPU. and it's been fine for the workload so far because it was certainly gotten lots and lots of value out of the hardware. There's certain things that it can't do however. One of those things is the first real blocker I ran into was NVIDIA. Sorry, it was Frigate. Trying to run that just wasn't able to handle the... I couldn't do the GPU offload, I guess, because the GPU was too old.

And I kind of boomeranged in self in that I done a lot of it over the years. With AI, the advent of AI development however have actually warmed up to self again because deploying things at home and maintaining software has become a lot easier when you can debug common problems and handle installations with an AI assistant. One of the software products that I've deployed recently is called Resource Space. It's a digital asset manager, a DAM. And this is something actually that I've wanted for many, many years because I've been involved in content creation and photography and videography and I've always really wanted to build up my own stock library of assets.

Cloud hosted DAMs are just too expensive for hobbyists like me, if you want to call it like that, even though some of it's related to my job. They're typical enterprise products, so it's actually a perfect use case for self-hosting because having the media resources on the local environment makes sense from an editing standpoint, where I'm editing at home. And it provides something that I couldn't afford and makes it available for free.

With Resource Space, the constraint seems to be in I think all the workloads that go on. When you upload stuff, it then runs some metadata processing. It tries to run some facial recognition stuff. And it feels at this point that rather than one and without them it just really can't process uploads. So I kind of feel that I thought about maybe putting in a new CPU and increasingly I kind of think that well maybe actually that's not going to do anything for all the other constraints and maybe as I've decided to actually go into self-hosting and I see it as a long-term thing I want to keep doing for my business, maybe this would actually be a good time to just say I've gotten enough use out of this computer, maybe I want to keep the hard drives or the SSDs.

I mean maybe even not that, maybe it just a good time to say this is time for an upgrade. So what I'm looking for is what I keep what I always feel about whenever I open up a desktop I feel like there's just a lot of space that's not utilized in terms of physically, there's just open space in the case. I don't know where that is. And I'm wondering, I feel like for my current workloads, so it's Proxmox with ZFS and then there's Ubuntu on top of that. So I don't think a mini PC is going to be powerful enough to do all these workloads.

I do want to have an NVIDIA GPU, ideally. And many more. Hi, by 30cm long tower desktop. The way computers are bought in Israel, where I live, is actually mostly it's a spec-based ordering process by which you go into a computer store, you describe what you want, what you need. They will, you'll agree upon a spec, they'll give you a price. And then they'll actually assemble the computer for you. So it's not, I mean, you can buy off-the-shelf servers and whatnot, Amazon, and more.

And besides the other stuff that I mentioned our restreamer for the camera I would like to run Frigate. I would like to run Resource Space. And it would be nice to be able to run local AI inference, but I think I know that really pushes the budget up a significant amount. So with all that spec in mind, give me a few suggested specifications. And then importantly for each of those form factor in terms of what is the most compact form factor that I could maybe condense all of that into.

And finally, one option for buying outside of Israel is that you might visit the US in a few months. And if it's something that can be small enough that could fit into a suitcase it could actually bring it back but I'd rather not go down that route but just as a possibility. \ No newline at end of file diff --git a/transcripts/uncorrected/37.txt b/transcripts/uncorrected/37.txt index 4b73fda258009e56d8fc1e8ade93312193c751d0..a6c4aa0dea473932cb03dfed9978e4ce2702e4a1 100644 --- a/transcripts/uncorrected/37.txt +++ b/transcripts/uncorrected/37.txt @@ -1 +1 @@ -I recently picked up a Samsung Galaxy 6 smartwatch just to try out the idea basically.

And my only need was really for a dual time display, local and UTC, and the day display.

It was about $100 give or take, so a very basic entry level that would sync with my OnePlus.

If it turns out that I really like it...

The other requirement was a good microphone for voice recordings.

Even if it's not the best and my phone is better, it would be nice to be able to use it for that because I take a lot of voicemails during the day.

If I turn out to really like it, what would you suggest as a good upgrade?

I tend to like more everything that's getting under the hood with technology.

So I wasn't thrilled about buying a Samsung, but it was what was available for the price point approximately. \ No newline at end of file +For Frigate Plus, what I want to do is as follows. I'm looking into getting a new, getting a server. And I'm conscious that you want empty labeling and identifying labeling. Both of them. So I'm going to curate those or gather those on the cameras.

And now, and if slash when I do the server upgrade, the home server upgrade, and then I would move over to Freigate and then actually start using the train models.

Worst case scenario it's just $50 and I never actually end up using the stuff but I'm hoping that I will at some point. \ No newline at end of file diff --git a/transcripts/uncorrected/38.txt b/transcripts/uncorrected/38.txt index dd20ba87d4e27f321810d0504c0736c4e154d407..57ee9e7328b60a23b4d9d39ea97021e9d3ff8e2d 100644 --- a/transcripts/uncorrected/38.txt +++ b/transcripts/uncorrected/38.txt @@ -1 +1 @@ -I recently picked up a smartwatch from Samsung Galaxy and I'm curious one thing that would be really helpful that I thought of.

I'm always stressed about losing or potentially losing phone wallet keys.

And for all of these things, Fun Walla Keys, I use Pebble Bee Tracker now.

So I'm wondering if there's any way or app that can do something like geofencing in which if any of the things are...

Maybe you can turn it on and off at certain times but they're in.

If they move out of the zone you get an alert notification if the smartwatch vibrates or whatever. \ No newline at end of file +So, I have a question. For image to video, it's currently expensive, very expensive actually. I'm trying to find a way. So I found the WAN models, which are by Alibaba. I find them to be very good, and they have a more affordable WAN model that I like using. And when I'm doing a video, I frequently gather up my images, gather up my prompts, and I move in towards a workflow by which I kind of do the storyboarding, gather the source material as I call it, the photos. Gather the prompt together, and then I will run it as a script, which is a very novel way for me of approaching content creation in the sense that it's programmatic and it's code first.

Which is a strange way to approach a creative process, but it works. And it seems to me at the moment to be the most effective way to do this because otherwise, before this, I was using a playground, running them one by one, importing them to a video editor, and it's just a lot slower that way. Now the issue is that image to video, as I mentioned, is expensive. And if I'm doing these projects for fun, I have a lot of ideas I want to do for fun. But even the cheaper WAN models are in the region of 10 to 15 cents per generation, which could easily, it's very easy to go through 20 or even 50 dollars, especially given the fact that frequently you need to generate the same prompt multiple times before you get a satisfactory result.

I really, really want to explore image to video, and I'm trying to find a way to have an affordable way to play around with it even if it's not the best model. And you know, so what I've been thinking of is I come across for a while providers like RunPod who do make GPU available either in serverless functions or they do per hour pricing on GPUs. And since I discovered Replicate and FAL, I've kind of wondered, well, if you can just make an API call, why go to the trouble of managing an instance of a machine? I'm thinking now that it might be the cost reason that if the machines are a certain price per hour, it might actually be a lot more cost-effective than using an API.

So my question is, firstly, is that the case? Is a frequent reason that people actually do these or use these services for cost mitigation? And so on. So that's the first thing. Secondly, serverless versus pods as RunPod calls them. I guess serverless almost makes more sense to me because you just pay for what you use and you don't need to worry about starting and stopping the pod and configuring auto shutdown policies. So what’s the reason that people go for pods over serverless?

And finally, if I want to do this, probably the objective would be, is there a way that you can have like your own API endpoint and that's running stuff on the serverless function in the backend? And what I get confused about for these things, the first time I did it, if I'm not mistaken, I did it with video generation. The video actually generated on my local, which seems almost like magic to me. So you're doing the actual inference rendering in the cloud. And is it just the case when that happens? And so on. And then just running my script and then I'm using on-demand compute. \ No newline at end of file diff --git a/transcripts/uncorrected/39.txt b/transcripts/uncorrected/39.txt index 1c658e5f3d7436116c6a372301158c4d76aff497..a2ad0808542f04e9e26405fe883f5a3a95fa8ce7 100644 --- a/transcripts/uncorrected/39.txt +++ b/transcripts/uncorrected/39.txt @@ -1 +1 @@ -Something that would be very useful would be the following. So I use an app called Voice Notes for Android. And it's a voice recording app. It's called Voice Notes. Now, it has one fatal flaw, in my opinion, which is that it doesn't have Bluetooth support. So when I'm out and about, like now, I literally hold the phone up to my mouth, and it certainly gets me much, much, much better recording quality, but I kind of look a little bit goofy and I feel very self-conscious.

So there's two things I've thought about. One is finding a voice recording app that has more robust Bluetooth support. I think there are two options really that I'm thinking of. The first is finding, as I said, a voice recorder with very robust Bluetooth support and using a Bluetooth microphone to record with. The alternative, because I'm seeing these products come to market increasingly, is to use a wearable Android device, which probably wouldn't be that different, maybe even physically. And I think the more I think about it, the more I think about it, the more I think about it, the more flexibility. Rather than being a Bluetooth accessory, it's running, I guess, Wear OS, and maybe that would give you more flexibility.

I'm trying to think of the pros and cons on which would be better. I veered towards the wearable approach as it seems to be what's where. I don't know where the market is going with this concept, but I'm curious to know what your thoughts are regarding the pros and cons. \ No newline at end of file +Yeah, I think I would look for... the truth is, I was initially... I have to try out my Cherry Red keyboard, the split one is a long term thing. But in the short term I have to say I've really warmed to MX Brown, and I think at this point I probably would use any MX Brown keyboard without noticing much of a difference from the AliExpress one, which is a brown imitation.

And this frankly one is it's a wired one and what I would like probably I'm thinking at the moment I wanted to set up a binding for cloud code and I think that rather than go down in the macro pad approach, which is one way, one approach certainly, it would be really nice to have a keyboard with built-in macro keys.

I think the MX Red one that I got has about five macro keys and I'm wondering if you can put about, you know, if you put up the entire top of the keyboard or the number pad, which I'm looking at the keyboard now. A lot of the keys that I rarely use are the sound controls, the number operators, pause, scroll lock, print screen. There's probably about 20% of the keyboard that I rarely touch.

Do you have any recommendations for a brown keyboard? Let's say I don't like compact keyboards, so I do like the full-size keyboard. The small keyboards feel cramped to me, but that has a full keyboard section and then maybe fills up some space on the right and along the top with macro keys, and so that rather than adding on micro pads you can just create some assignments on the keyboard itself. \ No newline at end of file diff --git a/transcripts/uncorrected/4.txt b/transcripts/uncorrected/4.txt index ca6ef51df22df40dd430aa5e130b5e2530c23d7f..7f079833353ac85099ff920067ea8f24a4f1e608 100644 --- a/transcripts/uncorrected/4.txt +++ b/transcripts/uncorrected/4.txt @@ -1 +1 @@ -I'm curious to know sometimes when I have a beer I feel fine and I have digestive issues since having my gallbladder surgery about six years ago. I mean, very very troublesome. But lately I was able to tolerate some amount of alcohol and today, as an example, I haven't eaten food yet. Maybe that's the cause. I just had a beer, an IPA at room temperature. There's a celebration going on in our country due to political news. But it's made me so bloated that it's hard to breathe. My stomach is like, I can just feel it expand out.

So this wasn't like a binge, it was just one beer. But the effect was very dramatic and very sudden. Maybe as soon as, I'm going to say, 10 minutes after consuming it. I'm already like I have a huge beer belly that just appeared out of nowhere. So I guess my question is, I always feel like cold drinks go down easier. I'm trying to figure out the situation for a long time. Because it's nice to be able to drink the odd beer without being bloated like a pregnant person. What do you say about this? \ No newline at end of file +A lot of prototyping is done with Streamlit and Gradio, and my question is, I see a lot of demos like this and I've created a lot of them, when you see, when people are using these to prototype ideas, my thinking is if I wanted to go to create a proper app after doing Streamlit, that wouldn't be such a problem, it's just changing around the front end, so Streamlit's value, or the value of these tools, is in validating functionality in a more elegant way than a CLI, and then, so would it be the case that a lot of projects start out using Streamlit just for prototyping?

Because I often wonder when I see some projects that are using Streamlit, is this intended as the finished product, or is it more for more tech literate audiences, or are these, is the trajectory commonly stopping at this, stopping at Streamlit, and then maybe let's say in the case of a commercial startup, showing that to investors, and then building an actual proper front end.

Because of so, I think I understand the value now of them, and are those the two main, those are the two main ones I've seen so far for AI projects and Python projects, those would be the two main ones I'm supposing, right? \ No newline at end of file diff --git a/transcripts/uncorrected/40.txt b/transcripts/uncorrected/40.txt index 59abde81206328bbd33b6fe792b0dcf161a7d148..e0ca9c5f871fe1db6ec60a09ae492e1cb1614512 100644 --- a/transcripts/uncorrected/40.txt +++ b/transcripts/uncorrected/40.txt @@ -1 +1 @@ -So there's a lot of these AI voice pins emerging onto the market which are designed to be wearable devices.

So I record as I'm doing now quite a number of voice notes when I'm out of the house.

I use an Android app called Voice Notes that I really like but it doesn't have support yet for Bluetooth microphones.

At least not support that's reliable.

So I have to hold the phone up to my mouth, which really kind of degrades the experience.

As I started, I want to actually start doing, going on walks expressly for usually the moment I do this when I'm going places.

I just happen to think, but I actually want to start taking walks to jot down some ideas as a healthier way of combining work and getting out and getting some exercise and getting some sunlight.

And for that it would be really nice to not have to, you know, be holding up a phone to your mouth for 30 minutes or an hour or whatever it may be.

So I was thinking about wearable voice recorders but a lot of them from what I've seen are these kind of closed ecosystems in which they sell you can't just buy the hardware.

They'll sell you, they'll do like onboard transcription or they'll sell you like a Cloud Transcription Bundle.

I'm really not a fan of on-device transcription.

I mean I think it works but in my experience it doesn’t make a lot of sense to me just architecturally.

I think why do stuff on device that can be done in the cloud cost effectively?

And you got, you know, you can run vastly more powerful models in the cloud.

You don’t have to worry about quantizing models on a very, very small piece of hardware.

And so I guess what would be great for me, but Android, when you're looking at wearables, Android's like the obvious sync partner.

So you just need to get the voice of the audio data from the recording thing to Android and from there you can push to the cloud and then the rest is back-end speech and text.

So what I'm saying is that I'd love a modular solution that could do this.

A pin that is just hardware, just recording this audio sync, maybe has its own app, or maybe can be used preferably with third-party apps.

And therefore you can kind of build your own voice recording stack around it, and you can use your existing Speech-to-Docs transcription workflow.

And you don't have to subscribe to these very kind of, I forget the word, walled gardens in which the vendor chooses your force into this package that's often very unnecessarily expensive and you're paying mostly for overpriced transcription.

I'd prefer to just get, invest in good hardware! \ No newline at end of file +Okay, so for Kdenlive, I wanted to get a macro pad with three toggles for video editing, a control surface in other words. I know that people on use, there's a few macro paths or there's a large community of people who have adapted different things for use with Kdenlive as control panels or control surfaces as they're called.

I have a friend who is a photographer and he bought an off-the-shelf controller and used it as a control surface for something else. And it made me think, is there anything that people commonly use for Kdenlive? What would be really helpful would be the three wheels for color correction, which would probably be... Those are, I guess, kind of toggles, and then scroll wheels for three scroll wheels, and it's always in pairs of three for that. But yeah, those are the ones that people commonly use and like. \ No newline at end of file diff --git a/transcripts/uncorrected/41.txt b/transcripts/uncorrected/41.txt index 65b441258e93b436681d73b0928dd3ea5da97777..2a58c5f30c61703af3ed4fd13d9b1c23315f1326 100644 --- a/transcripts/uncorrected/41.txt +++ b/transcripts/uncorrected/41.txt @@ -1 +1 @@ -I picked up a Samsung Galaxy FE watch. I checked compatibility, smartwatch. I think it's in the 7 series if I'm not mistaken. What is it exactly? It's a 40mm smartwatch. Where does it fit in their line up? What's the difference between this and the Watch 7? I just went for this one because it was what was in stock.

And is it shower proof, waterproof? And I know it's a glass display. So I'm wondering how tough is the glass? Or is it tough at all? I just asked because it's a fitness watch. I assume they make them a little bit more ruggedized, but maybe that's not the case. What does it say? \ No newline at end of file +So I'd love to get your thoughts on the following. There's a tweet from Sam Altman that he wrote a few years ago and it's aged quite well as they say. He was announcing the release of ChatGPT and maybe an early iteration of ChatGPT, maybe 3 or 3.5 or something like that. Maybe even an earlier one. And the tweet went something like, it's our conversational, or first it's a conversational model or something.

And what's interesting to me about this is that I discovered AI through ChatGPT or got excited about it through that interface. And then from there worked back to more instructional workloads as then I used it as a chat interface, then began using LLMs through their API endpoints and then began using them programmatically and scripting and using them on my local computer. And now I doing much more of that than I am using them as chatbots.

I know a lot of people, I think even people who are pretty technically literate, aren't really aware that there's, that there's, AI can be used in this way. But what's interesting about that tweet I mentioned is it inferred that instructional models actually predate conversational models. In other words, that I think what he was saying was that OpenAI had developed GPT firstly for instruction following, and then they sort of refined it for conversation.

And what I'm curious to know is, is that accurate that instructional models predate conversational models and if so by sort of how long? \ No newline at end of file diff --git a/transcripts/uncorrected/42.txt b/transcripts/uncorrected/42.txt index b51e9cf9eacfa8f539ba2c6270fbbbdcb80adeda..7ade92ea48527be48c9ed28805bb0153509bb3a1 100644 --- a/transcripts/uncorrected/42.txt +++ b/transcripts/uncorrected/42.txt @@ -1 +1 @@ -So there has been this vast development in multimodal AI recently. I signed up for Replicate and FAL AI. And what really strikes me is not only the diversity and number of models out there, but also the large number of permutations in multimodal AI, meaning what input can go to what output. And I think what I find difficult about it at the moment to navigate as a, let's say, creator. I created a few music videos just as kind of fun experiments. Is that there's so many different models. Like just in, let's say, the one series, there is maybe 20 different models to choose from in FAL, but they all do slightly different things not only in terms of the resolution and the parameter and the max duration but also in terms of the modalities, and they don’t really allow you to filter on this at the moment.

So what I mean by that is if we take an image to video model that animates still images to video, one model in one might create video without audio and another might create video with audio. And that's a very significant difference. But there's also a significant difference in do I prompt for the audio? In other words, is it going to be text to audio and render out audio that then gets added to video? Is it reference audio and reference image? So when you begin opening, all these differences really matter because I might want to filter on ideally, let's say I wanted to look at image to video models, which could generate lip sync to audio from a prompt. That might be one use case as well as the video.

In another use case, I might want to create a dialogue video. Let’s say I have a still image of a crowded market in Jerusalem, and I might want to print something like create a video from this image; the background soundtrack is background conversation noise in a bustling marketplace with vendors yelling out sales prices. That's just an example of the kind of background noise and the ambient noise that we have in this market I'm thinking about.

So what I would like to do, I created this repository which I created here. I'm trying to think of a taxonomy for multimodal, really for my own reference, but also as an open source project. Exploring the permutations of multimodal that are possible. So in the preceding example, we might have one definition of a modality might be still image to video without audio. Another modality, and then the description. Another modality might be still image to audio without lip sync. Another modality might be still image to video with lip sync.

But then you might have some sub modalities being still image to video with lip sync with reference image, that a reference to image. Another sub modality there might be still image to video with reference character reference in video. Another might be still image to video with audio with character reference through a LoRa (L-O-R-A). And I reckon that if we really enumerated the modalities we might get to hundreds if not thousands of different ones. For example, in FAL, just to talk about the long tail, there's music to music, which is music in painting. There's audio in painting, well, yeah, audio in painting, which I'm thinking aloud here is, I guess, distinguished music in painting is a subset of audio in painting, that it's melodic.

So that's the objective. I think that the JSON is the obvious format in which to attempt to denote these. And what I'd like you to do as the task definition is try to do this basically. Try to enumerate, list out a hierarchy, some kind of taxonomy representation that makes sense. We could try to create a baseline and then explore various ways of mapping out the hierarchy, manipulating the JSON so that we look at different ways of organizing it. So I think it would be useful to have like a first entry JSON in which we, and later maybe I, as new modalities come to, and we can maybe have very interesting labels might be their point of maturity, example workflows, use cases, etc. There's an awful lot that could be explored within these parameters. \ No newline at end of file +Here's my idea for an AI podcast workflow. I think if it's just questions summarized by AI and people know that the whole thing is text to speech, it's a little bit off-putting because people think I don't want to listen to just a robot speaking the whole time.

I think if the podcast format was that my voice prompt actually makes it into the final output so it starts with me recording a voice prompt as I'm doing now, then that gets transcribed. Then the rest of the workflow is the same, but what I do for the actual episode render is I combine my voice prompt with the AI response. So that you really get the feeling that it's me actually asking something that's definitely not AI. That I'm an identifiable person speaking. And then the podcast goes from there.

I think it would be more effective and more impressive and more enjoyable to listen to. \ No newline at end of file diff --git a/transcripts/uncorrected/43.txt b/transcripts/uncorrected/43.txt index 7691086737e7862b23604ec7c3b5a56071521899..ed50ed359bc55372aab37746585a31f7525ccc9a 100644 --- a/transcripts/uncorrected/43.txt +++ b/transcripts/uncorrected/43.txt @@ -1 +1 @@ -Look at the Facer's, I'm really surprised for no one's made a Hebrew date watch on the Facer creator, but it's probably the developer studio from Samsung is the way to go for that. And I want to edit, like the one that I have slightly, I can't find the perfect one, people put too much on them. I'm looking at the face I got from Facer now and they've added temperature, sunrise, sunset, neither of which work, I guess the integrations don't work, but who wants that on their watch? These are all like anti-simplicity. I just want... It's almost perfect, but they added these stupid unnecessary features.

Maybe on the Facer creator marketplace, I can just create one that I want. Maybe that will actually work. That's probably the easiest way to go. But if that doesn't work, I can create one on Github and open sources, the font that I want, but the Hebrew one would be very special to me. It's definitely possible.

I'm looking at my desktop display. It says 30 Tishra 5786. So for sure from the HIPAA Cal API the data source is there. And I looked last night and it seemed that people only had created sort of ones for from a very different reason.

The VoiceNote data set I really want to create as well. That's actually a very important project, the GUI for adding that I have a backlog of literally thousands and it would form the basis for my classification model which I should probably note out and that's a real model I can build for the idea as well. \ No newline at end of file +I will try to build. What I want to build is this: I don't know, is there a name for this kind of workflow? So let's say I go out taking B-roll. Now, right now I'm using a lot of it for populating my own library, and sometimes I share it with stock libraries. And usually, they strip the sound. I like to have a workflow in which, well, my ideal workflow would probably be something like this.

Let's say I have a folder full of media and P4 files. I can usually end up with a few mistakes, unintentional takes, and those usually would be like kind of less than five seconds duration. Usually, I just eyeball and I look for the ones with a small file size that's too small. Next thing I like to do would be stripping out the audio, batching, putting the video into its own folder, and then maybe, because for stock I'm shooting it handheld, it should be stabilized. So, stabilization.

So it's basically a pipeline. And my question is this: can this be done? But if I want to build a few pipelines like this, this is, let's say, my stock video pipeline. I might have another pipeline for sorting, so I might have a few media pipelines, and I don't want to have to go every time into a repository and run it. But it does make sense that it's just a script, basically.

So what's the best way to have a few scripts? I'm basically asking what's a good GUI for this kind of workflow? I want to have my media folders, and then I want to say run this script within this folder, and that would take the TDM out of setting up and resetting up environments and Python and all the rest of it. So what would you recommend as a tool for doing that? \ No newline at end of file diff --git a/transcripts/uncorrected/44.txt b/transcripts/uncorrected/44.txt index eaea5b9166faabd9642d0c97478ecd6f6fd86d89..acc8d62d6d5b71235676ccf824c7860bf8c12d53 100644 --- a/transcripts/uncorrected/44.txt +++ b/transcripts/uncorrected/44.txt @@ -1 +1 @@ -Okay, so I've just configured. VS Code is very, very important. I've just configured automatic updates, and I asked Claude, I said, why am I not getting them? Why do I, it says, you're out of date, download the Debian. And I said, I don't want to have to download a Debian every time, and I really want to keep this updated.

So it says, you should know, you need to join the Microsoft ASC, their repo, their third-party repo, which I had before then I think because I removed it as a duplicate.

So to clarify, it's not the case that you need to do this process. It is actually an automatic upgrade thing but you do need to be attached to the Microsoft repo to get those. \ No newline at end of file +I have a question here. I was exploring lately, getting up earlier, and it always really appealed to me. The idea of getting in sync with the sun, like the natural diurnal cycle. Stricadian rhythm, when the sun goes down approximately that's when you get ready for bed. When the sun comes up, that's maybe when you get ready, that's when you get up. But that would require, in the winter time at least, here, where I live, going to bed as early as, I mean I guess it depends. Whether you'd want to go to bed immediately at sundown, I think that's probably not realistic, and a couple of hours later. But even if you did the latter, you'd be talking about going to bed at like 8 o'clock in the winter, maybe as early as 7.

Now my question is, my interest in this really comes from a question I've always wondered or thought about, which is that until relatively recently there was no such thing as artificial illumination that you could click on with a switch in your home at least, and even the concept of street lighting being totally reliable and totally every street in a developed city being covered in street lighting, that was also a foreign concept. So in the evolution of humans, it seems to me it must be the case that this is a very recent adaptation.

So my question is really, from the historical record, what do we know about the kind of sleep cycle that humans gravitate to naturally when there isn't alternative lighting? Artificial lighting. Thanks for watching! \ No newline at end of file diff --git a/transcripts/uncorrected/45.txt b/transcripts/uncorrected/45.txt index ffc57e5992be591a97dbd7ee169ed839fe73e975..8a430e2e093e18c208dfeff13e05eabe999f06dd 100644 --- a/transcripts/uncorrected/45.txt +++ b/transcripts/uncorrected/45.txt @@ -1 +1 @@ -I want to add to my DSR Holdings a LLM store TXT. It's almost a pity I didn't talk about this with Shlomo, but a radical idea. It actually, I mean, it appears to be working. I don't know if you're sure where I read from if it just parts my home page or read the txt but I asked Claude to pull in some context data about me into the into the file it seemed to work really well so what the thought I had for I mentioned Shlomo and what I thought about for myself is inbound LLM marketing considering AI traffic.

It's a pity I didn't take some in fact I'll add to the DAM a screenshots folder because a perfect example of a screenshot was the last time that I saw a and I sure I see them almost every day A sign up form where they didn ask for was the LLM your referral source I think it's absolutely insanity that anyone, any company would not have LLM as top of their list of referral sources for traffic.

And this opens up a whole world actually of LLM analytics. and you see which LLMs are scraping our site. LLM optimization. And then basically the idea of being LLM as an inbound pipeline. If you did all this well, could you actually view large language models as an inbound traffic source saying Google's dead, LLM is where it's at.

Here's how you can, I mean, I would have to try these approaches on my own site, but all I can do there is keep optimizing and see if someone says, if you typed into ChatGPT in a month and said, I need someone who's good with AI in Jerusalem, Israel. Can you find any profiles? And if it worked, that would almost be the opposite to pursue the outbound track as well for jobs. But as a complementary angle of attack, I think it would be very interesting to see as an experiment even. \ No newline at end of file +I have a Nord 3 5G and I'm looking for a power bank. It supports this fast charging protocol. I think it's called SuperVOOC. And I was looking for a power bank that could basically charge it as quickly as possible, deliver the fastest charging that it can support from a non-AC outlet.

I got one from Bezeus before. I don't know what it was, it was 65W, I don't know if that's relevant for mainly smartphones or if it's just for laptops. But in any case, I think I've lost that power bank, so I need a new one.

Now I guess what I would probably like is the biggest capacity that you can fit into a power bank form factor. By which I mean, at a certain point, we're not really mobile, they make these power stations I think they're called. So the biggest thing you can get, and not an exaggerated spec but a real credible spec in terms of the mAh.

And the quickest, the combination of the quickest charging and the biggest capacity for this particular phone. Anything you'd recommend from Mosaic or other, let's say more credible manufacturers? \ No newline at end of file diff --git a/transcripts/uncorrected/46.txt b/transcripts/uncorrected/46.txt index e9383aa5db79a22c214793ffdd4a93fc6ed49a60..839741289e07ba1b0cbfd1312e40cff52a27de8a 100644 --- a/transcripts/uncorrected/46.txt +++ b/transcripts/uncorrected/46.txt @@ -1 +1 @@ -Can I just make a suggestion? Before we proceed in this direction, I think that it definitely is the right content environment. But the reason I've created these is so that we have them ready for recurrent use. So Lama Index is very, very good and would be used for a lot of very versatile.

So before we start, let's update the cond environment to install all the different utilities we might need for tokenizing text, processing markdown, markdown to PDF, PDF splitting, all these different text utilities. Even ImageMagick typesetting utilities. Once we have that ready then we can begin. But let's get that environment good first if we can use a conda.yaml to define it.

In other words, take in the existing environment, make a few edits and then install that. Just remember there's an AMD GPU so it will affect the choice of packages. \ No newline at end of file +I'd like to create a voice recording app for Ubuntu Linux. The app should have the following functionalities. It's a voice recorder, and it has the essential voice recording functionalities of record, pause, stop, and restart. The restart scraps the current recording in cache and restarts the recorder from zero.

For the transcription process, I'd like to institute the following workflow. We'll use Google Gemini API and ensure that we're using Gemini 2.5, which supports multimodal input, including audio. The recording captured from the user should be optimized for this purpose of voice capture for speech to text. By which I mean, I would suggest that we record in mono. We capture the recording in a space-efficient format. We're optimizing for creating a voice recording that is not necessarily the greatest and most detailed of audio clarity, but which strikes the best balance between quality and space efficiency for transcription.

The voice recording will get sent to Gemini for transcription with a system prompt that instructs it to transcribe and also clean up the recording by removing filler words, adding sentence structure, and adding spaces. There can be a second button which is called transcribe and optimize, and the transcribe and optimize workflow is the same except that the system prompt is a little bit more instructive and it tells Gemini, in addition to those steps, to remove filler words, add sentence structure, paragraph spacing, and try to optimize the text by adding headings, organizing the thoughts a bit, and removing repetition, so it's a little bit more aggressive.

In both cases, the transcription, when it returns from Gemini API, will populate into a... In fact, Gemini should return two things, a title and a text. The title is a short title for the voice note. The text is short is the text, and so on. The text is formatted in Markdown; it should appear within the Markdown within the text editor. There should also be a clipboard button, and finally, the user should be able to save the note.

When the user chooses to save the note, it will get saved to a predetermined folder which the user selects as where they save voice notes on their operating system. And it's saved there as a Markdown file with the title in machine-friendly format. So if the voice note title has spaces, the saved file name will just replace those with hyphens.

The app would be run repetitively such that if the user wants to record a new note, they start again, and when they do transcribe or transcribe and optimize, it will send and then overwrite the previous transcription. So the user has to click the save button in order to save it, or there can be an option for auto-saving configurable as a user setting. \ No newline at end of file diff --git a/transcripts/uncorrected/47.txt b/transcripts/uncorrected/47.txt index 68f0272363ffede253054f91243a4d0b8203d19b..b7896c7f96af437ec44fecaba4cd587b9fd8c785 100644 --- a/transcripts/uncorrected/47.txt +++ b/transcripts/uncorrected/47.txt @@ -1 +1 @@ -Okay, here's just a few more specific things that I want to include. So I see you mentioning hydration drinks, which is very important. Electrolyte tablets become very expensive. So there's a few things I'd like to explore. More cost-effective ways for making them. I think you can buy them as a dry powder is one idea. The second one is a homemade recipe.

The next set of ideas is I really really need to always have some kind of food stuff at home ready to eat. So there's a few things in that regard. A list of a kind of basic pantry shopping list. Obviously optimized for all the dietary recommendations we've discussed here. Suggestions for, and I think protein bars aren't really enough, it needs to be carbohydrate as well. Recipes or suggestions for homemade protein bars for the same reason that they become very expensive to buy them individually.

That's probably the key thing I'm looking for at the moment is to have always on hand the ingredients and ideally like kind of a backup layer like I kind of make these protein bars but I also and that's kind of the fallback but ideally I prefer to obviously eat and so on. \ No newline at end of file +Okay, so the basic validation of the app is good. It functions according to spec.

I'd like to just remove the emojis and please take a look at the screenshots of the app as it's currently implemented and see if you can think of any design and UI optimizations that would make it even more friendly to use.

For transcribe and optimize, we definitely would like to have a label transcribe and optimize.

Maybe let's have a homer text or an about section where we describe to users the differences between these two functions. \ No newline at end of file diff --git a/transcripts/uncorrected/48.txt b/transcripts/uncorrected/48.txt index b373213f419ec9b2e4b9ca165f42170441577ed2..c4062c3c839f500b2242b1b7628a7ef9e4bd26f0 100644 --- a/transcripts/uncorrected/48.txt +++ b/transcripts/uncorrected/48.txt @@ -1 +1 @@ -Okay there's a bunch of memory layer projects now to explore later that are actually it's not longer separation between vector storage and memory which makes sense because it's kind of basically the same server it's offered by API mem0 super memory remember api memories.api that's a good starter list and they can all be integrated and used they'll do the vector backend so I'm using I'm testing it out on the documentary finding one, but just to see the concept and how it works with agency. \ No newline at end of file +I would like to create a docs folder in this repository.

The docs folder should be separate from the code and it will be the place in which documentation is gathered.

Ask the user if there is any specific functionalities or aspects of the application that the user wishes to document in this folder.

The docs folder should be mentioned and linked in the readme, directing users to it for more extensive documentation than can be found in the readme itself. \ No newline at end of file diff --git a/transcripts/uncorrected/49.txt b/transcripts/uncorrected/49.txt index 847a19b97210af5a0d79cb54c259b54cbe8103aa..ec565a8b602b1abb235b4c8a5616370d701f5be7 100644 --- a/transcripts/uncorrected/49.txt +++ b/transcripts/uncorrected/49.txt @@ -1 +1 @@ -Create now a meetings taker, meetings minute producer. It will have the following functionality. The user will upload a recording of meetings, of a meeting that took place. and we'll provide then there will be a section so that's an audio upload functionality the next one will be a meeting participants the user will provide the names and identifying characteristics of people who are audible in the recording so it'll say like for example and there should be Name, Description, Daniel, male voice in the recording, Hannah, female voice in the recording.

Upon receiving both of these things, it will send it to Gemini Multimodal in order to produce two things One is a transcript, slightly cleaned up diaries transcript That's one output and the second one is a minute which is a automatically generated minutes formatted with decisions, action items for each participant.

And then it should be integrated with Google Drive so the user can connect their Google Drive and save them to a folder after they've been generated and view them in the app. \ No newline at end of file +Please go through the markdown files in this repository to make sure that no emojis have been used.

If you find any emojis, remove them.

If emojis have been used in place of proper icons, then identify an appropriate icon library that could be used to provide the emojis.

Remember that if the icons are well known, such as the icons from major social networks, these should be integrated via a pre-designed library.

Do not attempt to create custom once-off SVGs for any logo that likely already exists in a professional library. \ No newline at end of file diff --git a/transcripts/uncorrected/5.txt b/transcripts/uncorrected/5.txt index 4a7a1d90e43a9e5403edd584c7f643c7b95625ab..c0f7715bfb8ac0cd9c18c80ada8f919b594001ee 100644 --- a/transcripts/uncorrected/5.txt +++ b/transcripts/uncorrected/5.txt @@ -1 +1 @@ -So I got a Bezeus 65W charger for my OnePlus Nord 3 5G and I see there's also 22W. What does that number refer to? And can I get maximum charging speed from the 22W one if I'm not using any of these for a laptop? \ No newline at end of file +See if you can find a project that we could install. You can search on Github. What would be brilliant would be to have a speech-to-text that I can run in any window on this computer on which I can use a macro key to start and stop transcription. I use F13 for this purpose.

A lot of the projects that I've seen are using local whisper. Whereas I would prefer any approach that uses cloud transcription that I can provide an API key, let's say for OpenAI or for any other STT. It doesn't have to be exactly real-time. There can be a bit of latency.

But I'm looking for hit a button, speak, and then the text is directly input into whatever window you're focused on, just as if you were typing on a keyboard. If you can install them in the programs directory of this computer, we can try out a couple of options. Make sure to search on Github for this because a few good projects have come about in the past number of months. \ No newline at end of file diff --git a/transcripts/uncorrected/50.txt b/transcripts/uncorrected/50.txt index 73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c..97dc205e9d7b77068f580705263f66d3a0ce82b0 100644 --- a/transcripts/uncorrected/50.txt +++ b/transcripts/uncorrected/50.txt @@ -1 +1 @@ -I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.

I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations. \ No newline at end of file +Go through the website and see any place in which icons have been implemented which were custom designed but which could have been implemented more efficiently through using an existing icon library.

Pay particular attention to icons for common uses such as social media icons which exist in many libraries, as well as emojis which may have been used in place of icons.

This approach should not be followed.

If the user uses an existing icon library that you can identify, then replace the custom coded icons with the most appropriate matches.

If the user hasn't yet implemented an icon library, provide some suggestions to the user, focusing on those libraries which will best match the aesthetic which they are following in their designs. \ No newline at end of file diff --git a/transcripts/uncorrected/51.txt b/transcripts/uncorrected/51.txt index 24994713fc006cf39dff6433f341d9e5b812c141..0f9f01aeb1efa9b56a188dbecffed93a32cfd7c5 100644 --- a/transcripts/uncorrected/51.txt +++ b/transcripts/uncorrected/51.txt @@ -1 +1 @@ -So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.

What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.

A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner. \ No newline at end of file +This repository contains a collection of slash commands which I use with Claudecode.

I capture some of the slash commands using speech to text.

The slash commands that have been captured with dictation frequently lack elements like punctuation, paragraph spacing, and they may contain occasionally words that were mistranscribed.

Please recurse through the directories and correct slash commands which you can find which were missing these basic textual features but do not limit your fixes to only I don't want to go into those containing these defects but rather consider in your editing any slash commands which need to be rewritten for optimal intelligibility. \ No newline at end of file diff --git a/transcripts/uncorrected/52.txt b/transcripts/uncorrected/52.txt index 8eb532b0a713565b3b2fae20960656ec0d9e6e2f..35a25c66c27c2d44f0a64ca785442bcb2b03db07 100644 --- a/transcripts/uncorrected/52.txt +++ b/transcripts/uncorrected/52.txt @@ -1 +1 @@ -Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.

They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.

Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for. \ No newline at end of file +This repository contains a folder of screenshots.

The intended use of the screenshots is that they will be integrated into the README or other documentation to demonstrate the UI of the app.

It's important therefore that the screenshots have descriptive file names.

Please rename the screenshots for this purpose and integrate them into the README in the most appropriate section. \ No newline at end of file diff --git a/transcripts/uncorrected/53.txt b/transcripts/uncorrected/53.txt index 492695d3c04244eba8ee90b40f4d0ed8cbb6793b..c3e6aec46313e6c703697e4fcc48f050db3015c1 100644 --- a/transcripts/uncorrected/53.txt +++ b/transcripts/uncorrected/53.txt @@ -1 +1 @@ -Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.

And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.

So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.

What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.

Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it. \ No newline at end of file +What's the most professional way to install a package on Linux? If I create an executable and copy that into the directory on path, such that I can call it, is that considered a worse way to install applications than through a Debian package? \ No newline at end of file diff --git a/transcripts/uncorrected/54.txt b/transcripts/uncorrected/54.txt index acadef7c73d2b38c88ec7b03751c008a67eca4fc..72dd47f2927e95f6a555120604796efb0f7010e8 100644 --- a/transcripts/uncorrected/54.txt +++ b/transcripts/uncorrected/54.txt @@ -1 +1 @@ -Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.

Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe. \ No newline at end of file +Your task is to take this system prompt and rewrite it for implementation in a structured AI system.

In order to do so, adhere to the following instructions.

Within the text of the prompt itself, define the The JSON output that the AI should be constrained to giving.

And instruct the AI tool that it is working in a structured workflow and must only return valid JSON.

Create a folder for the prompt.

And add in addition to the rewritten prompt text.

You should also create a .json file containing an Open API compliant JSON schema and finally and you create another JSON called object.json which contains just the JSON object. \ No newline at end of file diff --git a/transcripts/uncorrected/55.txt b/transcripts/uncorrected/55.txt index 48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45..76af9ed38a7f3a464480738293afb78a25ff5929 100644 --- a/transcripts/uncorrected/55.txt +++ b/transcripts/uncorrected/55.txt @@ -1 +1 @@ -Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.

Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends. \ No newline at end of file +Okay, so here is the type of license that generally work for me for open source projects. I usually open source software because I've created something useful. I think other people might either find it helpful or develop upon the idea to do it to take my idea and ability further. Attribution is always appreciated but I'd only want to make it mandatory if that wouldn't really sort of create friction with other people who'd like to use a project.

But attribution really helps me because it opens up the relationship and connectedness of open sourcing because if someone were to use it downstream, they have a way to sort of get in touch with me. People commercializing open source software doesn't sit very well with me, but again, it's only if it's, I'd be very reluctant to add that as a limitation.

Other than that, nothing else really stands out to me as something that I'd require. Like if people took it in any other direction, it's fine. The only one I think about sometimes is obviously no one wants something that creates to be sort of misused or used for harm. And one also doesn't want to end up with lawsuits if something they create is misused, so I don't know if there's any legal language that can create a little bit of protection around those potentials. \ No newline at end of file diff --git a/transcripts/uncorrected/56.txt b/transcripts/uncorrected/56.txt index 353b380ddee0d6134e7cfc905de9171524ef566e..b57417cde1c5303a489404bcd259f827ea2cf7a6 100644 --- a/transcripts/uncorrected/56.txt +++ b/transcripts/uncorrected/56.txt @@ -1 +1 @@ -I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.

And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.

And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that. \ No newline at end of file +The problem is that we looked at this before and when it reboots the router it's not bringing up the Cloudflare tunnel.

So see it's working now, but just see what can be done to make sure that this, we need to make very certain that this does start automatically on reboot. \ No newline at end of file diff --git a/transcripts/uncorrected/57.txt b/transcripts/uncorrected/57.txt index 0ec335394a72e80887a3672f290bc5828d8227e0..4b73fda258009e56d8fc1e8ade93312193c751d0 100644 --- a/transcripts/uncorrected/57.txt +++ b/transcripts/uncorrected/57.txt @@ -1 +1 @@ -I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.

And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.

It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda. \ No newline at end of file +I recently picked up a Samsung Galaxy 6 smartwatch just to try out the idea basically.

And my only need was really for a dual time display, local and UTC, and the day display.

It was about $100 give or take, so a very basic entry level that would sync with my OnePlus.

If it turns out that I really like it...

The other requirement was a good microphone for voice recordings.

Even if it's not the best and my phone is better, it would be nice to be able to use it for that because I take a lot of voicemails during the day.

If I turn out to really like it, what would you suggest as a good upgrade?

I tend to like more everything that's getting under the hood with technology.

So I wasn't thrilled about buying a Samsung, but it was what was available for the price point approximately. \ No newline at end of file diff --git a/transcripts/uncorrected/58.txt b/transcripts/uncorrected/58.txt index 243f36cf36c052964af7ebe83a792dae9e67d205..dd20ba87d4e27f321810d0504c0736c4e154d407 100644 --- a/transcripts/uncorrected/58.txt +++ b/transcripts/uncorrected/58.txt @@ -1 +1 @@ -I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.

But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.

Come up with an imaginative name for this use case. \ No newline at end of file +I recently picked up a smartwatch from Samsung Galaxy and I'm curious one thing that would be really helpful that I thought of.

I'm always stressed about losing or potentially losing phone wallet keys.

And for all of these things, Fun Walla Keys, I use Pebble Bee Tracker now.

So I'm wondering if there's any way or app that can do something like geofencing in which if any of the things are...

Maybe you can turn it on and off at certain times but they're in.

If they move out of the zone you get an alert notification if the smartwatch vibrates or whatever. \ No newline at end of file diff --git a/transcripts/uncorrected/59.txt b/transcripts/uncorrected/59.txt index 35a55fa10abb62fbf49bc2c38d73e8cc53fca620..1c658e5f3d7436116c6a372301158c4d76aff497 100644 --- a/transcripts/uncorrected/59.txt +++ b/transcripts/uncorrected/59.txt @@ -1 +1 @@ -This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.

So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links. \ No newline at end of file +Something that would be very useful would be the following. So I use an app called Voice Notes for Android. And it's a voice recording app. It's called Voice Notes. Now, it has one fatal flaw, in my opinion, which is that it doesn't have Bluetooth support. So when I'm out and about, like now, I literally hold the phone up to my mouth, and it certainly gets me much, much, much better recording quality, but I kind of look a little bit goofy and I feel very self-conscious.

So there's two things I've thought about. One is finding a voice recording app that has more robust Bluetooth support. I think there are two options really that I'm thinking of. The first is finding, as I said, a voice recorder with very robust Bluetooth support and using a Bluetooth microphone to record with. The alternative, because I'm seeing these products come to market increasingly, is to use a wearable Android device, which probably wouldn't be that different, maybe even physically. And I think the more I think about it, the more I think about it, the more I think about it, the more flexibility. Rather than being a Bluetooth accessory, it's running, I guess, Wear OS, and maybe that would give you more flexibility.

I'm trying to think of the pros and cons on which would be better. I veered towards the wearable approach as it seems to be what's where. I don't know where the market is going with this concept, but I'm curious to know what your thoughts are regarding the pros and cons. \ No newline at end of file diff --git a/transcripts/uncorrected/6.txt b/transcripts/uncorrected/6.txt index c88cdbee095d6b251fb7f4c16b01d2952b2e8b43..e67b16ca2fca9b7ea8e44bdac10458c45ce16809 100644 --- a/transcripts/uncorrected/6.txt +++ b/transcripts/uncorrected/6.txt @@ -1 +1 @@ -I built a couple of days ago, just to note some of the things I've been working on. So I recorded a long conversation with my mother AC and I wanted to transcribe it so I did that with Whisper. But I wanted to get it into SRT format. So a few good things occurred from this and I just want to know what they are.

The first is I understood that Hugging Face spaces, which are of course apps. So what you can do is you can create your own space. I can create a private space and choose to run it on any of their GPUs. The GPUs are on demand so I can create let say I guess what I previously thought was well hugging face spaces are great because you can prototype an idea and then you can share it etc They often used to demonstrate ideas or spaces What I didn't realize and what just clicked with me is you can create, let's say for example, a whisperer like what I did the transcription workflow. and or an image to video workflow that's not going to be it's not valuable to run locally as is more often the case than not especially for the current hardware you have which is AMD you deploy that up to Hugging Face and then you run it on their enterprise-grade hardware so you can run up to a quantum computer which is obviously something that no ordinary computer and many more

For FAL AI if you paying let say 25 cents a clip it not cost effective Google is like a clip But if you run your own, now I'm finally understanding why people pay for on-demand GPUs or they have provisioned GPUs All of these are great because some of the medium grade hardware that Hugging Face provides is priced at about $1-$3 per hour. Which is actually quite cheap and an awful lot cheaper than paying per generation. So the danger with these things of course is everyone's nightmare is leaving a serverless function turned on and then getting a huge bill. So they have pause settings. So that was one thing.

The other thing I discovered the thing I sort of connected finally is why it useful to duplicate spaces on Hugging Face I don really understand what the point was Someone created something that works So if you can duplicate a working space, you don't need to build it. But then it's private, so it's not... You know that nothing's happening, and then you can run it on your own terms in your own hardware.

And then finally, there is RunPod and Modal for on-demand GPUs. and many more. I'm also using this for this purpose. I was building a pipeline with PyAnode for diarization that doesn't really exist well in an API and then building these things yourself, pulling down these massive Python libraries every time is just way too time consuming. So if you create something and try it out in an environment that's much much faster, that's why these things are valuable. \ No newline at end of file +For Alibaba Cloud, Alibaba and Quen are obviously very tightly linked. To plot the names a little bit more closely, Quen is a general model, One is the multimodal model, and first tenant access is through exclusively Alibaba Cloud. Everybody else is reselling.

And Aliyun, A-L-I-Y-U-N, is the official Alibaba API platform, providing access to both Quen and One. \ No newline at end of file diff --git a/transcripts/uncorrected/60.txt b/transcripts/uncorrected/60.txt index e3960e6d457375f71a0aa63d07c4c8ad4af74fc2..59abde81206328bbd33b6fe792b0dcf161a7d148 100644 --- a/transcripts/uncorrected/60.txt +++ b/transcripts/uncorrected/60.txt @@ -1 +1 @@ -Okay, I'd like to create a sustainability report parser which will operate as follows. The user will provide a link to a sustainability disclosure or better they will upload a PDF. That's the expectation.

Upon receiving the PDF from the user the app will load the PDF in a frame. Gemini will identify on which page sustainability, The disclosure data for Scope 321 emissions is reported. And the PDF will load up in the frame, the viewer, with that page skipped to that page, and the data highlighted with a yellow overlay, slight highlight.

And beneath it Gemini will output the table for the top level in other words the summary of the scope 321 emissions with a short text description of what they were in summary the units detected scope 321 itemize then a disclaimer under that that this detection is based on automated processing may be incorrect and so on. \ No newline at end of file +So there's a lot of these AI voice pins emerging onto the market which are designed to be wearable devices.

So I record as I'm doing now quite a number of voice notes when I'm out of the house.

I use an Android app called Voice Notes that I really like but it doesn't have support yet for Bluetooth microphones.

At least not support that's reliable.

So I have to hold the phone up to my mouth, which really kind of degrades the experience.

As I started, I want to actually start doing, going on walks expressly for usually the moment I do this when I'm going places.

I just happen to think, but I actually want to start taking walks to jot down some ideas as a healthier way of combining work and getting out and getting some exercise and getting some sunlight.

And for that it would be really nice to not have to, you know, be holding up a phone to your mouth for 30 minutes or an hour or whatever it may be.

So I was thinking about wearable voice recorders but a lot of them from what I've seen are these kind of closed ecosystems in which they sell you can't just buy the hardware.

They'll sell you, they'll do like onboard transcription or they'll sell you like a Cloud Transcription Bundle.

I'm really not a fan of on-device transcription.

I mean I think it works but in my experience it doesn’t make a lot of sense to me just architecturally.

I think why do stuff on device that can be done in the cloud cost effectively?

And you got, you know, you can run vastly more powerful models in the cloud.

You don’t have to worry about quantizing models on a very, very small piece of hardware.

And so I guess what would be great for me, but Android, when you're looking at wearables, Android's like the obvious sync partner.

So you just need to get the voice of the audio data from the recording thing to Android and from there you can push to the cloud and then the rest is back-end speech and text.

So what I'm saying is that I'd love a modular solution that could do this.

A pin that is just hardware, just recording this audio sync, maybe has its own app, or maybe can be used preferably with third-party apps.

And therefore you can kind of build your own voice recording stack around it, and you can use your existing Speech-to-Docs transcription workflow.

And you don't have to subscribe to these very kind of, I forget the word, walled gardens in which the vendor chooses your force into this package that's often very unnecessarily expensive and you're paying mostly for overpriced transcription.

I'd prefer to just get, invest in good hardware! \ No newline at end of file diff --git a/transcripts/uncorrected/61.txt b/transcripts/uncorrected/61.txt index 4215c595a95e066a9ecda2a2ae08b9013686c002..65b441258e93b436681d73b0928dd3ea5da97777 100644 --- a/transcripts/uncorrected/61.txt +++ b/transcripts/uncorrected/61.txt @@ -1 +1 @@ -Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.

Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.

Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.

The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.

Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.

The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer. \ No newline at end of file +I picked up a Samsung Galaxy FE watch. I checked compatibility, smartwatch. I think it's in the 7 series if I'm not mistaken. What is it exactly? It's a 40mm smartwatch. Where does it fit in their line up? What's the difference between this and the Watch 7? I just went for this one because it was what was in stock.

And is it shower proof, waterproof? And I know it's a glass display. So I'm wondering how tough is the glass? Or is it tough at all? I just asked because it's a fitness watch. I assume they make them a little bit more ruggedized, but maybe that's not the case. What does it say? \ No newline at end of file diff --git a/transcripts/uncorrected/62.txt b/transcripts/uncorrected/62.txt index 145fac41057e67a2489a588fef1f5d5a4b0df965..b51e9cf9eacfa8f539ba2c6270fbbbdcb80adeda 100644 --- a/transcripts/uncorrected/62.txt +++ b/transcripts/uncorrected/62.txt @@ -1 +1 @@ -Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.

Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.

Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.

And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK

So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.

I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.

I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.

So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that ! \ No newline at end of file +So there has been this vast development in multimodal AI recently. I signed up for Replicate and FAL AI. And what really strikes me is not only the diversity and number of models out there, but also the large number of permutations in multimodal AI, meaning what input can go to what output. And I think what I find difficult about it at the moment to navigate as a, let's say, creator. I created a few music videos just as kind of fun experiments. Is that there's so many different models. Like just in, let's say, the one series, there is maybe 20 different models to choose from in FAL, but they all do slightly different things not only in terms of the resolution and the parameter and the max duration but also in terms of the modalities, and they don’t really allow you to filter on this at the moment.

So what I mean by that is if we take an image to video model that animates still images to video, one model in one might create video without audio and another might create video with audio. And that's a very significant difference. But there's also a significant difference in do I prompt for the audio? In other words, is it going to be text to audio and render out audio that then gets added to video? Is it reference audio and reference image? So when you begin opening, all these differences really matter because I might want to filter on ideally, let's say I wanted to look at image to video models, which could generate lip sync to audio from a prompt. That might be one use case as well as the video.

In another use case, I might want to create a dialogue video. Let’s say I have a still image of a crowded market in Jerusalem, and I might want to print something like create a video from this image; the background soundtrack is background conversation noise in a bustling marketplace with vendors yelling out sales prices. That's just an example of the kind of background noise and the ambient noise that we have in this market I'm thinking about.

So what I would like to do, I created this repository which I created here. I'm trying to think of a taxonomy for multimodal, really for my own reference, but also as an open source project. Exploring the permutations of multimodal that are possible. So in the preceding example, we might have one definition of a modality might be still image to video without audio. Another modality, and then the description. Another modality might be still image to audio without lip sync. Another modality might be still image to video with lip sync.

But then you might have some sub modalities being still image to video with lip sync with reference image, that a reference to image. Another sub modality there might be still image to video with reference character reference in video. Another might be still image to video with audio with character reference through a LoRa (L-O-R-A). And I reckon that if we really enumerated the modalities we might get to hundreds if not thousands of different ones. For example, in FAL, just to talk about the long tail, there's music to music, which is music in painting. There's audio in painting, well, yeah, audio in painting, which I'm thinking aloud here is, I guess, distinguished music in painting is a subset of audio in painting, that it's melodic.

So that's the objective. I think that the JSON is the obvious format in which to attempt to denote these. And what I'd like you to do as the task definition is try to do this basically. Try to enumerate, list out a hierarchy, some kind of taxonomy representation that makes sense. We could try to create a baseline and then explore various ways of mapping out the hierarchy, manipulating the JSON so that we look at different ways of organizing it. So I think it would be useful to have like a first entry JSON in which we, and later maybe I, as new modalities come to, and we can maybe have very interesting labels might be their point of maturity, example workflows, use cases, etc. There's an awful lot that could be explored within these parameters. \ No newline at end of file diff --git a/transcripts/uncorrected/63.txt b/transcripts/uncorrected/63.txt index b314f3f74074ca02c2a47132cea688da6abb56d9..7691086737e7862b23604ec7c3b5a56071521899 100644 --- a/transcripts/uncorrected/63.txt +++ b/transcripts/uncorrected/63.txt @@ -1 +1 @@ -Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.

Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.

And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.

And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.

But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be. \ No newline at end of file +Look at the Facer's, I'm really surprised for no one's made a Hebrew date watch on the Facer creator, but it's probably the developer studio from Samsung is the way to go for that. And I want to edit, like the one that I have slightly, I can't find the perfect one, people put too much on them. I'm looking at the face I got from Facer now and they've added temperature, sunrise, sunset, neither of which work, I guess the integrations don't work, but who wants that on their watch? These are all like anti-simplicity. I just want... It's almost perfect, but they added these stupid unnecessary features.

Maybe on the Facer creator marketplace, I can just create one that I want. Maybe that will actually work. That's probably the easiest way to go. But if that doesn't work, I can create one on Github and open sources, the font that I want, but the Hebrew one would be very special to me. It's definitely possible.

I'm looking at my desktop display. It says 30 Tishra 5786. So for sure from the HIPAA Cal API the data source is there. And I looked last night and it seemed that people only had created sort of ones for from a very different reason.

The VoiceNote data set I really want to create as well. That's actually a very important project, the GUI for adding that I have a backlog of literally thousands and it would form the basis for my classification model which I should probably note out and that's a real model I can build for the idea as well. \ No newline at end of file diff --git a/transcripts/uncorrected/64.txt b/transcripts/uncorrected/64.txt index 8d2caf72445f7704d8455a3c2b790fdf76026b9e..eaea5b9166faabd9642d0c97478ecd6f6fd86d89 100644 --- a/transcripts/uncorrected/64.txt +++ b/transcripts/uncorrected/64.txt @@ -1 +1 @@ -The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.

I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system. \ No newline at end of file +Okay, so I've just configured. VS Code is very, very important. I've just configured automatic updates, and I asked Claude, I said, why am I not getting them? Why do I, it says, you're out of date, download the Debian. And I said, I don't want to have to download a Debian every time, and I really want to keep this updated.

So it says, you should know, you need to join the Microsoft ASC, their repo, their third-party repo, which I had before then I think because I removed it as a duplicate.

So to clarify, it's not the case that you need to do this process. It is actually an automatic upgrade thing but you do need to be attached to the Microsoft repo to get those. \ No newline at end of file diff --git a/transcripts/uncorrected/65.txt b/transcripts/uncorrected/65.txt index 2acd54bd254b2cdcc6a5457142eb4e0e917685f0..ffc57e5992be591a97dbd7ee169ed839fe73e975 100644 --- a/transcripts/uncorrected/65.txt +++ b/transcripts/uncorrected/65.txt @@ -1 +1 @@ -Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.

The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.

I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.

What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice. \ No newline at end of file +I want to add to my DSR Holdings a LLM store TXT. It's almost a pity I didn't talk about this with Shlomo, but a radical idea. It actually, I mean, it appears to be working. I don't know if you're sure where I read from if it just parts my home page or read the txt but I asked Claude to pull in some context data about me into the into the file it seemed to work really well so what the thought I had for I mentioned Shlomo and what I thought about for myself is inbound LLM marketing considering AI traffic.

It's a pity I didn't take some in fact I'll add to the DAM a screenshots folder because a perfect example of a screenshot was the last time that I saw a and I sure I see them almost every day A sign up form where they didn ask for was the LLM your referral source I think it's absolutely insanity that anyone, any company would not have LLM as top of their list of referral sources for traffic.

And this opens up a whole world actually of LLM analytics. and you see which LLMs are scraping our site. LLM optimization. And then basically the idea of being LLM as an inbound pipeline. If you did all this well, could you actually view large language models as an inbound traffic source saying Google's dead, LLM is where it's at.

Here's how you can, I mean, I would have to try these approaches on my own site, but all I can do there is keep optimizing and see if someone says, if you typed into ChatGPT in a month and said, I need someone who's good with AI in Jerusalem, Israel. Can you find any profiles? And if it worked, that would almost be the opposite to pursue the outbound track as well for jobs. But as a complementary angle of attack, I think it would be very interesting to see as an experiment even. \ No newline at end of file diff --git a/transcripts/uncorrected/66.txt b/transcripts/uncorrected/66.txt index b2de03d17424a2fed8639d2dfa09c98e84d864d7..e9383aa5db79a22c214793ffdd4a93fc6ed49a60 100644 --- a/transcripts/uncorrected/66.txt +++ b/transcripts/uncorrected/66.txt @@ -1 +1 @@ -It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.

And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached. \ No newline at end of file +Can I just make a suggestion? Before we proceed in this direction, I think that it definitely is the right content environment. But the reason I've created these is so that we have them ready for recurrent use. So Lama Index is very, very good and would be used for a lot of very versatile.

So before we start, let's update the cond environment to install all the different utilities we might need for tokenizing text, processing markdown, markdown to PDF, PDF splitting, all these different text utilities. Even ImageMagick typesetting utilities. Once we have that ready then we can begin. But let's get that environment good first if we can use a conda.yaml to define it.

In other words, take in the existing environment, make a few edits and then install that. Just remember there's an AMD GPU so it will affect the choice of packages. \ No newline at end of file diff --git a/transcripts/uncorrected/67.txt b/transcripts/uncorrected/67.txt index f2066bdff489a0e7af0c17fa8ccf736412194aad..68f0272363ffede253054f91243a4d0b8203d19b 100644 --- a/transcripts/uncorrected/67.txt +++ b/transcripts/uncorrected/67.txt @@ -1 +1 @@ -Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.

Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.

So if you happen to know, you should know of any products on AliExpress and product numbers, list them please. \ No newline at end of file +Okay, here's just a few more specific things that I want to include. So I see you mentioning hydration drinks, which is very important. Electrolyte tablets become very expensive. So there's a few things I'd like to explore. More cost-effective ways for making them. I think you can buy them as a dry powder is one idea. The second one is a homemade recipe.

The next set of ideas is I really really need to always have some kind of food stuff at home ready to eat. So there's a few things in that regard. A list of a kind of basic pantry shopping list. Obviously optimized for all the dietary recommendations we've discussed here. Suggestions for, and I think protein bars aren't really enough, it needs to be carbohydrate as well. Recipes or suggestions for homemade protein bars for the same reason that they become very expensive to buy them individually.

That's probably the key thing I'm looking for at the moment is to have always on hand the ingredients and ideally like kind of a backup layer like I kind of make these protein bars but I also and that's kind of the fallback but ideally I prefer to obviously eat and so on. \ No newline at end of file diff --git a/transcripts/uncorrected/68.txt b/transcripts/uncorrected/68.txt index 73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c..b373213f419ec9b2e4b9ca165f42170441577ed2 100644 --- a/transcripts/uncorrected/68.txt +++ b/transcripts/uncorrected/68.txt @@ -1 +1 @@ -I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.

I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations. \ No newline at end of file +Okay there's a bunch of memory layer projects now to explore later that are actually it's not longer separation between vector storage and memory which makes sense because it's kind of basically the same server it's offered by API mem0 super memory remember api memories.api that's a good starter list and they can all be integrated and used they'll do the vector backend so I'm using I'm testing it out on the documentary finding one, but just to see the concept and how it works with agency. \ No newline at end of file diff --git a/transcripts/uncorrected/69.txt b/transcripts/uncorrected/69.txt index 24994713fc006cf39dff6433f341d9e5b812c141..847a19b97210af5a0d79cb54c259b54cbe8103aa 100644 --- a/transcripts/uncorrected/69.txt +++ b/transcripts/uncorrected/69.txt @@ -1 +1 @@ -So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.

What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.

A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner. \ No newline at end of file +Create now a meetings taker, meetings minute producer. It will have the following functionality. The user will upload a recording of meetings, of a meeting that took place. and we'll provide then there will be a section so that's an audio upload functionality the next one will be a meeting participants the user will provide the names and identifying characteristics of people who are audible in the recording so it'll say like for example and there should be Name, Description, Daniel, male voice in the recording, Hannah, female voice in the recording.

Upon receiving both of these things, it will send it to Gemini Multimodal in order to produce two things One is a transcript, slightly cleaned up diaries transcript That's one output and the second one is a minute which is a automatically generated minutes formatted with decisions, action items for each participant.

And then it should be integrated with Google Drive so the user can connect their Google Drive and save them to a folder after they've been generated and view them in the app. \ No newline at end of file diff --git a/transcripts/uncorrected/7.txt b/transcripts/uncorrected/7.txt index ab9a2f95ba91effd6318d8c0d1348ceec4164b4e..555445e5a0ceaacdcdcf5330ea03cd0eb05c1409 100644 --- a/transcripts/uncorrected/7.txt +++ b/transcripts/uncorrected/7.txt @@ -1 +1 @@ -The top thing I want to, the technical thing I want to find today is the whisper hotkey. I can try to use Claude. Or maybe there is some, maybe it really is a live coding thing. Or there is some, the speech, I think there is a speech subreddit, speech tag. And I think it's going to get bigger and bigger.

The other thing I want to think about is microphone positioning for the one that I have the ATR mic. It's actually a mono source mic and I probably will get much different results. I think actually I might go back to the Gooseneck and keep this one just for I guess maybe on the road use. I think the pickup is better, but it's worth actually thinking about that. It may be just trying a few audio samples head to head.

And in fact it would be very easy to do an evaluation with almost really scientific in nature. Record the exact same thing, exact same phrase into a microphone in controlled conditions, in other words it's a note down the source, note down the distance, similar gain, note down the parameters in other words, all using the same Whisper model and then compare the and the speech arrays for every microphone. It would be interesting to know what the best results have and the results of comparing head to head.

Wearable headset Bluetooth headset lav mic and then for the non microphones comparing the Samsung Q2U with the ATR with the other ATR, the boundary mic, and with the gooseneck microphone. And seeing which of those we say reliably is the best in class for both modes of operation. And of course it's going to vary a little bit more but it would be surprising actually if someone hadn't done the research it would also be interesting to look at what Philips are bringing to market at the moment maybe even sort of plan it down the road as my ideal.

Yeah, it's $470 a dictation microphone. I'm not sure what would be the best use case to look to in terms of critical accuracy. I would say probably something like medical use, like radiology or transcription, because a lot of these are built for on-the-go use, where you have the context that's most fitting to my one of someone sitting at a desk. Now it's not guaranteed to be quiet with Ezra, so there's a bit of background noise going on, but it's still a relatively stable acoustic environment and in these contexts where you know no one's compromising on on a hundred dollars here or there what do people reach for as the gold standard? \ No newline at end of file +I have some issues with Ethernet cables coming out. I have one computer in particular that it's a flat type cable and it just pulls out all the time. Now this particular piece of hardware is probably going to last another year at best before I replace it. The cost of this single Ethernet cable is not really significant so frequently I find myself thinking I would be happy to just super glue in a cable.

What I'd actually like to do is to have some kind of locking Ethernet port but I know that those are very specialty hardware so for but for things like this where I say okay yeah speed is forward compatible it's not going to be deprecated I don't really care about by the time I'm looking at doing the networking again the cable and the hardware will be history so why not just super glue it in.

Can that be done? I thought before maybe that would pose an issue to connectivity but from looking at an Ethernet connector the actual connecting wires are on the inside of the jack right so I presume that won't be an issue. \ No newline at end of file diff --git a/transcripts/uncorrected/70.txt b/transcripts/uncorrected/70.txt index 5eac1414e49e1b8618ce1ba2193d7d10b91f431a..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c 100644 --- a/transcripts/uncorrected/70.txt +++ b/transcripts/uncorrected/70.txt @@ -1 +1 @@ -I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.

In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end? \ No newline at end of file +I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.

I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations. \ No newline at end of file diff --git a/transcripts/uncorrected/71.txt b/transcripts/uncorrected/71.txt index 8eb532b0a713565b3b2fae20960656ec0d9e6e2f..24994713fc006cf39dff6433f341d9e5b812c141 100644 --- a/transcripts/uncorrected/71.txt +++ b/transcripts/uncorrected/71.txt @@ -1 +1 @@ -Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.

They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.

Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for. \ No newline at end of file +So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.

What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.

A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner. \ No newline at end of file diff --git a/transcripts/uncorrected/72.txt b/transcripts/uncorrected/72.txt index 492695d3c04244eba8ee90b40f4d0ed8cbb6793b..8eb532b0a713565b3b2fae20960656ec0d9e6e2f 100644 --- a/transcripts/uncorrected/72.txt +++ b/transcripts/uncorrected/72.txt @@ -1 +1 @@ -Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.

And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.

So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.

What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.

Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it. \ No newline at end of file +Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.

They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.

Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for. \ No newline at end of file diff --git a/transcripts/uncorrected/73.txt b/transcripts/uncorrected/73.txt index acadef7c73d2b38c88ec7b03751c008a67eca4fc..492695d3c04244eba8ee90b40f4d0ed8cbb6793b 100644 --- a/transcripts/uncorrected/73.txt +++ b/transcripts/uncorrected/73.txt @@ -1 +1 @@ -Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.

Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe. \ No newline at end of file +Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.

And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.

So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.

What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.

Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it. \ No newline at end of file diff --git a/transcripts/uncorrected/74.txt b/transcripts/uncorrected/74.txt index 48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45..acadef7c73d2b38c88ec7b03751c008a67eca4fc 100644 --- a/transcripts/uncorrected/74.txt +++ b/transcripts/uncorrected/74.txt @@ -1 +1 @@ -Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.

Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends. \ No newline at end of file +Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.

Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe. \ No newline at end of file diff --git a/transcripts/uncorrected/75.txt b/transcripts/uncorrected/75.txt index 353b380ddee0d6134e7cfc905de9171524ef566e..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 100644 --- a/transcripts/uncorrected/75.txt +++ b/transcripts/uncorrected/75.txt @@ -1 +1 @@ -I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.

And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.

And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that. \ No newline at end of file +Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.

Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends. \ No newline at end of file diff --git a/transcripts/uncorrected/76.txt b/transcripts/uncorrected/76.txt index da218ad130c3c5a5f3ca672509c6c517f4fa87f2..353b380ddee0d6134e7cfc905de9171524ef566e 100644 --- a/transcripts/uncorrected/76.txt +++ b/transcripts/uncorrected/76.txt @@ -1 +1 @@ -I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.

When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.

The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.

This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case. \ No newline at end of file +I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.

And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.

And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that. \ No newline at end of file diff --git a/transcripts/uncorrected/8.txt b/transcripts/uncorrected/8.txt index 22d59168820d520421fbe4bc4a1965b66ebe8127..9d0e899beabae9e2b4c594fe5fb566fe9ba1666d 100644 --- a/transcripts/uncorrected/8.txt +++ b/transcripts/uncorrected/8.txt @@ -1 +1 @@ -I can try for my, I don't know with my notes editor that I had before, did I ever try it with a fully local version? Because I was thinking now, for the one where you do record voice and transform, if the back end was Whisper and, let's see, Lama 3.2, you know Lama, you made it very specific. This is what it's supposed to be using, even just for my own use. And you set up the environment.

That could be local mode, you could have local mode and remote mode. That could be a very, very useful tool. And if you just had one simple cleanup prompt, this could be the latest iteration of the text cleanup utility. Keep it simple, asynchronous.

SpeechNote but kind of pared down to just really really good capture and the cleanup thing which they don't, SpeechNote has never integrated that post-processing step which would differentiate it. \ No newline at end of file +For a MCP, this would be a great one to try with Gemini, would be, so I'm writing out a task list on a piece of paper now, and tasks Gemini OCR, character recognition, and then using a task creation MCP like Todoist, in order to identify the dates, etc, and create the tasks from that list in one shot.

I think it would be worth trying with Firebase Studio. \ No newline at end of file diff --git a/transcripts/uncorrected/81.txt b/transcripts/uncorrected/81.txt index 73fdefbd1c2ebcfad9ad59e23523ae1b8526edf2..4215c595a95e066a9ecda2a2ae08b9013686c002 100644 --- a/transcripts/uncorrected/81.txt +++ b/transcripts/uncorrected/81.txt @@ -1 +1 @@ -Okay, so I'd like to add to the VoiceNote dataset manager. So I have really annotations, there's two main objectives for this project as I currently conceive of it. And I think on the front end it would be useful to, when I'm uploading stuff and annotating, to have two separate sections for it, a little bit more clearly delineated. and so on.

So, if we have delineated, for example, where we have upload new voice note, that can firstly just be called maybe upload, next section transcripts, next section, and by next section I'm defining the headers, next section classification, next section annotations.

So in classification, I'll just add a few more recurrent ones that we should have. Prompt General, Development Prompt, Read Me Dictation, Social Media Post, and then in Annotations.

So content issues call that Audio defects and let add one for a significant background noise In audio quality issues, what I'd like to have actually maybe is, and again, we're going to, I mean, in the process of defining the annotations and might have to sort of work backwards initially, but most of them haven't been annotated yet. I'm not going to start annotating until the schema is defined so it would actually be a lagging annotation process.

The ones that are missing currently are background music. You have background noise but I think background music is actually very important because from a copyright standpoint that could be an issue. and for multi-language don't actually even have English Hebrew I'd have to keep it open-ended as to what other languages are present and I'd like to have one for background conversations actually and tagging by language so English Hebrew Arabic Russian French I'm hard these would be the ones that encounter my local environments a lot \ No newline at end of file +Okay, I'd like to create an app which does the following. The purpose of the app is to visualize how different countries, ideologies, systems approach common policy challenges. An example of a policy challenge that I'm just providing for explaining how I could see this working is second-hand smoke control. Some countries have very strict regulations, some countries have very lax enforcement. And probably there is not really much distinction by system of government but the user prompts it called policy visualizer and the user enters a policy challenge. So another example might be minimum alcohol purchasing laws.

Once Gemini receives this prompt, its task will be to research how different countries in the first instance approach this topic. And from that analysis, it can identify commonalities or clusters. The research process happens in the back end. And the user is shown some kind of progress indicators like researching what it's doing basically. Not a huge amount of verbosity but just a few cues so the user knows that it's not stuck or it's actually doing something.

Once Gemini concludes its first pass it will have grouped not necessarily every country in the world but based on the clusters it identifies it found groups. Each group is given a label. The label might be laissez-faire, permissive. These may be either recognized labels or what Gemini feels it's best to describe them as. And the countries are displayed with their national flags in alphabetical order.

The next functionality is that the user can click on the cluster and Gemini will describe what it is about this law that it considered them to be a cluster. In other words, the way in which they approach the challenge. That's a modal. Then the user can click on any country and it can see how that country approaches it. So I might click on the flag of Germany and either an accordion or a modal it show how Germany approaches in this case gun control and its cluster.

Country level is always a tab and only if there's other taxonomies. By taxonomy I mean that we think there's a very, Gemini says there's a very big difference and how different right-wing versus left-wing approaches we're going to do. We're going to create one more tab with that. But that should be kind of only if there's very compelling reason to do so. Or if it has significant data to share. So if it feels like there's enough data about how US states approach an issue at the state level, it might create a tab called US States and then follow the same pattern in which it groups them into clusters.

The objective is to, rather than searching through Google to see how different countries do different things, to start with your question and then get this visualisation. And I think the icing on the cake would be an analysis. So this is a visual presentation and then there may be analysis showing significant differences, some similarities. So there's like a report, a textual report, but the main tab, because I think it's the most interesting one, is the visualization, the policy visualizer. \ No newline at end of file diff --git a/transcripts/uncorrected/82.txt b/transcripts/uncorrected/82.txt new file mode 100644 index 0000000000000000000000000000000000000000..145fac41057e67a2489a588fef1f5d5a4b0df965 --- /dev/null +++ b/transcripts/uncorrected/82.txt @@ -0,0 +1 @@ +Alright, so the plan is for this repository, I want to create an audio media streaming interface for my home network. And there's a few things I want to roll into this one too.

Number 1 is media playback. So I have a volume on the NAS called AudioShare. The NAS is 10.0.0.50. So connect to the NAS, you'll find the AudioShare volume and let's mount that as the media library. It'll have a lot of tracks already populated.

Second thing is a soundboard. So I'll create a folder within that audio share volume called soundboard. And in the soundboard I just upload some stupid sound effects I do one to start it off Like laughing sound.

And then I also want to create a intercom system. and the functionality for the intercom is that from this computer, sorry from the interface which will be audio.residence.jlm.com I'd like to have the push to talk and the start and stop. PUSH TO TALK

So for the speaker networking this is where I would like you to give me your thoughts on what makes the most sense So I've used before MPD. I've installed MPD clients on... So the devices are, there is a device called Nursery Pi in SSH. Bedroom Pi, R-Pi and Smart TV. Each one is connected to a speaker. That's the network.

I tried MPD, putting an MPD client on each device. MPD has been the most reliable But it seems kind of a pity to use this when there are protocols like SnapServer that are designed specifically for this use case. However, using Home Assistant, I found SnapServer to be very buggy. I could never really get it to work and many more and the system that's reliable.

I find with MPD, because you need to select the speaker on the client devices, those bindings frequently broke. So I'd like to have something that kind of, the speakers are really never going to change. In the sense that I'm going to, I have a sound card for the Raspberry Pi. That's the speaker. and for as long as I use this system that's gonna be the configuration. So I want to set up something that once it's in place it's pretty much just gonna work.

So I leave that call up to you and please create a... Create a folder in the repository providing your recommendations just before you begin and what you suggest as the best implementation for the multi-speaker network whether it is broadcasting to a bunch of MCD clients from the Web UI or whether it's creating a single Snap server or something else that manages the networking I don't envision much of a need to select individual speakers by which I mean, I think that for the most part the occasions I'm using this I'll just play media to the pool but of course it would be nice to be able to select that ! \ No newline at end of file diff --git a/transcripts/uncorrected/83.txt b/transcripts/uncorrected/83.txt new file mode 100644 index 0000000000000000000000000000000000000000..b314f3f74074ca02c2a47132cea688da6abb56d9 --- /dev/null +++ b/transcripts/uncorrected/83.txt @@ -0,0 +1 @@ +Building a Reporting Disclosure. I have a few thoughts. One, I can create a model. A model is actually quite feasible. It would be, but it's a data annotation project. It's saying, here's a PDF, here are the actual variables. In other words, here's the scope 3, scope 2, scope 1, here are the units, train it like that.

Second thought is if I did want to put together a dataset of sustainability disclosure reports, I think you could argue a public fair use clause for the PDFs being there.

And then the one I did with Gemini the other day which was basically a parsing AI tool seemed to work and could probably be used in production and which works even maybe as a way of trying to get in touch with Google is they have They have definitely an AI for good division who may let's say provide Gemini credits for the actual deployment of it on Cloud Run. Because from my first run of it, it was very, very promising for the task of parsing the reports.

And that would greatly the feature would be when it extracts the data human human in the loop is done by seeing what it is matching it to a company in the database or to a known company Let's take Google itself as an example. Detects its stock ticker, detects its stock exchange. And then you click like add to database meaning that you're adding the validated data and it could even pull out the metadata from the document pull out the source and that would be a great way of building up a human validated database in other words you take the reports you say either everything everything looks good to me or this is wrong either way you add it then of course you've got the missing financials and the rest of the world.

But that would probably be because there is thousands of sustainability disclosures, especially when you consider I think beyond the US globally, and it's beyond. So certainly it's a task for a model, but it's also human in the loop. The ultimate question is if Gemini stock performs 99% sufficiently well in the task of extracting this data from the sustainability reports. A model might actually not even be necessary because out of the box it's almost perfect. That is, I suspect, what the case would be. \ No newline at end of file diff --git a/transcripts/uncorrected/84.txt b/transcripts/uncorrected/84.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d2caf72445f7704d8455a3c2b790fdf76026b9e --- /dev/null +++ b/transcripts/uncorrected/84.txt @@ -0,0 +1 @@ +The purpose of the repository basically is to model or suggest the idea of using AI agents to scope out gap filling and extending multi-agent networks based on their inferred understanding of the purpose of a multi-agent network.

I think iterative workflow is the best. It suggests to the user what about this agent the user says yes or no, rather than the batch system. Although it could do both, but let's make the defaults the kind of individual review system. \ No newline at end of file diff --git a/transcripts/uncorrected/85.txt b/transcripts/uncorrected/85.txt new file mode 100644 index 0000000000000000000000000000000000000000..2acd54bd254b2cdcc6a5457142eb4e0e917685f0 --- /dev/null +++ b/transcripts/uncorrected/85.txt @@ -0,0 +1 @@ +Okay, I'd like to create an app with Gemini. It's going to do the following. It will be called MyEQCreator. Here's how it works.

The user will, there will be a microphone recording interface, or the user can upload a file. Either way, the user should aim to upload a three minute audio sample. Audio Sample goes to Gemini and Gemini will parse the submitted audio to determine speaker characteristics, namely their vocal range, frequency distribution. And when it does this its goal way to provide an EQ preset for the user.

I use Audacity for lightweight audio editing and if I had a Daniel voice preset that had these EQ settings built in or that could even use via a CLI I would use it but that would require maybe a second pass Gemini would generate it according to that file spec.

What would be very useful and impressive in addition would be after the analysis a five second audio sample might be visualized and the frequencies highlighted to illustrate to the user where the frequency distribution falls for their particular voice. \ No newline at end of file diff --git a/transcripts/uncorrected/86.txt b/transcripts/uncorrected/86.txt new file mode 100644 index 0000000000000000000000000000000000000000..b2de03d17424a2fed8639d2dfa09c98e84d864d7 --- /dev/null +++ b/transcripts/uncorrected/86.txt @@ -0,0 +1 @@ +It would be great to run the demo. I'm opening, creating a .env. And it would be useful so people can see straight up how it works to have a page that just says demo.

And it'll have so we'll need to run the audio data through the pipeline just as if we were using it capture the results into the repo here and just display that on the front end I've just provided the Gemini API key so let's try to do that I I also deleted, I think we just need one readme and the instructions for the app can be attached. \ No newline at end of file diff --git a/transcripts/uncorrected/87.txt b/transcripts/uncorrected/87.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2066bdff489a0e7af0c17fa8ccf736412194aad --- /dev/null +++ b/transcripts/uncorrected/87.txt @@ -0,0 +1 @@ +Hello, yeah, I'm looking for, okay, I'm trying to find a phone case for the Nord 3 5G from OnePlus. I want something which has MagSafe, a magnet built into the case itself, and something good quality and that's just a good protective case for the phone.

Do you know of any recommendations? Any ones on AliExpress or if Otterbox makes a case for this phone or anyone else? It's a slightly older OnePlus, so it's tricky to find a compatible case for it.

So if you happen to know, you should know of any products on AliExpress and product numbers, list them please. \ No newline at end of file diff --git a/transcripts/uncorrected/88.txt b/transcripts/uncorrected/88.txt new file mode 100644 index 0000000000000000000000000000000000000000..73f338799a7ffd0c5b0b5fd814b5e3f3a8c78a2c --- /dev/null +++ b/transcripts/uncorrected/88.txt @@ -0,0 +1 @@ +I'd like to create a content recommendation app. This will be using... I'd like to get recommendations for movies to watch, things on Netflix, YouTube that are up to date. I'm based in Israel. I like watching things that are based on a true story or true stories. I prefer to watch things that are recent so it has to be up to date and the pitfall with these apps is that they'll recommend stuff that you've already seen or you don't want to watch so it would have to have some memory that it makes recommendations preferably one at a time and I can say like add to watch list or add to recommendation list or not interested or I've seen and the app would need to remember these responses so that it doesn't. It's just the same thing over and over again.

I know there's TMDB API which is great for getting movies. I have an API key I can provide. And I'd like to maybe say recommend across all categories just recommend movies. The Netflix thing it's very hard to get recommendations that are geo-sensitive for Netflix but that would probably be the ideal meaning that I'm based in Israel and if stuff isn't available here that should be considered as recommendations. \ No newline at end of file diff --git a/transcripts/uncorrected/89.txt b/transcripts/uncorrected/89.txt new file mode 100644 index 0000000000000000000000000000000000000000..24994713fc006cf39dff6433f341d9e5b812c141 --- /dev/null +++ b/transcripts/uncorrected/89.txt @@ -0,0 +1 @@ +So what I would like to do in this is create an app really for the purpose of demonstrating the capabilities of audio input as a modality because I think it's overlooked and it brings a lot of really interesting use cases.

What I'd like to do for this one is, as one facet of it, the user uploads a recording. It should be a recording of just one speaker. And upon receiving the recording, it'll be ingested to Gemini. and Gemini will analyse it for the following. It will try to categorise the speaker's accent. It will estimate the words per minute at which they speak. And then it will provide a phonetic analysis, basically a linguistic analysis of their speech, how they pronounce certain and many others.

A voice clip, Gemini processes it and then it produces a detailed analysis in a nicely displayed manner. \ No newline at end of file diff --git a/transcripts/uncorrected/9.txt b/transcripts/uncorrected/9.txt index ae3da7506e205016728f0028c258d57cd3e14246..4cf1a9b24541382f74948dfe82af3b41863bf6e2 100644 --- a/transcripts/uncorrected/9.txt +++ b/transcripts/uncorrected/9.txt @@ -1 +1 @@ -I should add integration to Cloudinary also for the website of the world. And environment variables as well, just making sure that they're copied in there.

And I'll see as well for Vercel if you can have the deployment be per branch. So there's a preview branch, you want to set that up as well. \ No newline at end of file +There's also AI APIs for image to 3D model and as well there's a couple of distinct model operators there and it's even a in 3D a lot of really exciting things are happening there is a real world so you can prompt like a game environment which is totally wild and so there's a lot that's emerging as possibilities now in this very exciting space. \ No newline at end of file diff --git a/transcripts/uncorrected/90.txt b/transcripts/uncorrected/90.txt new file mode 100644 index 0000000000000000000000000000000000000000..5eac1414e49e1b8618ce1ba2193d7d10b91f431a --- /dev/null +++ b/transcripts/uncorrected/90.txt @@ -0,0 +1 @@ +I'd like to consider a wee factor and then just give me your thoughts about this so currently it's a file based backend what I was wondering is would it make more sense to have a lightweight database backend SQLite let's say and and the important part of the utility which is the Hugging Face dataset push is what I'm using for the classification model would actually be a job whereby locally it will create the dataset from the local backend.

In other words, rather than having this sit in place as files, it's going to be constructed periodically. Basically when I say okay I've uploaded another batch, let's push, would that be easier and more logical to integrate with the front end? \ No newline at end of file diff --git a/transcripts/uncorrected/91.txt b/transcripts/uncorrected/91.txt new file mode 100644 index 0000000000000000000000000000000000000000..8eb532b0a713565b3b2fae20960656ec0d9e6e2f --- /dev/null +++ b/transcripts/uncorrected/91.txt @@ -0,0 +1 @@ +Okay what I'd like to do is create an application with Gemini. The user will upload their resume and upon receiving the resume the purpose of this application is to ideate and many more. So, I'm going to show you how to create jobs, positions that the user might be suitable for. It could be what they've done previously or an extension of that, but it would also try to suggest alternative directions, as in slide pivots or rigby pig pivots.

They'll frame its suggestions with job title as in if the user uploads their resume they'll say oh you could be an AI product manager, salary range for this position. The user might also maybe the user should provide where they based though that should be obvious from the CV. So try to contextualize that by their area demand who hires for it analysis why this could be a cool job for you. Knowledge gaps slash upskilling, how you might want to upskill to qualify yourself for this job. Keywords that this job might be that you might find opportunities using these keywords. A certification, certifications that I want to pursue.

Then a kind of a Tinder interface, and so on. So, it's a really nice, thumbs up, thumbs down, and those are recorded in memory so that the user can go back through the suggestions that it liked. So it's kind of a career ideation tool really, career pivot ideation tool for the user to explore alternative directions if they're feeling like they might not be thinking very sufficiently widely about what it is that they could be using their skills for. \ No newline at end of file diff --git a/transcripts/uncorrected/92.txt b/transcripts/uncorrected/92.txt new file mode 100644 index 0000000000000000000000000000000000000000..492695d3c04244eba8ee90b40f4d0ed8cbb6793b --- /dev/null +++ b/transcripts/uncorrected/92.txt @@ -0,0 +1 @@ +Here's an idea for a product I had. Tell me if you think it's ridiculous and if something like this has been attempted. So, speech-to-text transcription is amazing and I've become very dependent on it for voice typing. Unfortunately, on Linux and specifically, it's really tricky to find something that works at the operating system level. There are tools for Windows and Mac, and what I really need is something that will do it in any program. Not a browser extension, not an IDE extension, because then you're forever looking for does this tool have voice support. And you end up having, like what I have now, three or four Whisper subscriptions.

And many more. And you free yourself from the keyboard literally, you begin to want to use it at all your computers on my laptop. And some of them, my desktop can run a whisper, my laptop really can't. And you don't want to be spending a bunch of time provisioning separate environments.

So my idea is for a mini PC, think something like the Raspberry Pi or Orange Pi, but not presented as an enthusiast product so much as a little edge device and many more A box for all intents and purposes which runs on device a very efficient speech model like Whisper and it does on hardware local inference. Everything is optimized for this one workload. It has a USB out and the USB out it functions as a HID device and it sends the transcribed text and so on. Influence on the device and straight out USB.

What this means is you can plug your voice keyboard, which I think is obvious name, into anything. You can have it bound to your desktop for most of the time, you go away for traveling for a while, you pack your box. So it's really analogous to a keyboard.

Now what I was thinking to myself as a stupid idea is yes, you could do this stuff on device, you could use Claude, maybe it's too niche. But it could be quite creative for people who are really into voice typing and want a way to. And if it had Bluetooth support, your little box, your voice typing centerpiece could also work with your tablets, your phone and you could sort of extend around it. \ No newline at end of file diff --git a/transcripts/uncorrected/93.txt b/transcripts/uncorrected/93.txt new file mode 100644 index 0000000000000000000000000000000000000000..acadef7c73d2b38c88ec7b03751c008a67eca4fc --- /dev/null +++ b/transcripts/uncorrected/93.txt @@ -0,0 +1 @@ +Another idea for Gemini app. Recipe modifier, you get a recipe. Gemini parses the recipe, structures the data. Then, using a nutritional database, attempts to calculate the total fat per serving and the fat per ingredient.

Then, this is an app for people like me who are trying to adhere to a low-fat diet. It remixes a recipe to either achieve a certain fat amount, as in under X grams of fat, or to just make a general reduction within reasonable bounds while still trying to keep the recipe the recipe. \ No newline at end of file diff --git a/transcripts/uncorrected/94.txt b/transcripts/uncorrected/94.txt new file mode 100644 index 0000000000000000000000000000000000000000..48df2efb7e5f7af2de5f6a9e6f79c4188a1f5e45 --- /dev/null +++ b/transcripts/uncorrected/94.txt @@ -0,0 +1 @@ +Google ID8 to Try would be one of the apps that connects with the Google Workspace services. Which I don't know, maybe they've circumvented their general cautiousness.

Like voice to email. You send an email, you record a voice memo, it transcribes it, it checks your contacts, it generates an email, it shows you a draft, is that okay, and then it sends. \ No newline at end of file diff --git a/transcripts/uncorrected/95.txt b/transcripts/uncorrected/95.txt new file mode 100644 index 0000000000000000000000000000000000000000..353b380ddee0d6134e7cfc905de9171524ef566e --- /dev/null +++ b/transcripts/uncorrected/95.txt @@ -0,0 +1 @@ +I'd like to create an app that does the following. The user will paste an image or multiple images into the image upload feature. It'll run it through Gemini and it will attempt to extract the following fields: Serial Number, Model Number, Manufacturer, in a text field it will OCR readable text, Country of Manufacture.

And then based upon the detected product, the manufacturer and the part number and the serial number, it will provide a one line description, it will provide a multi-line description, it will provide a spec sheet. It will provide a year of first released on the market, age in years based on first release minus the current time, correct to the nearest 8.1, one decimal place.

And deprecation level from almost deprecated, fully deprecated, RRP, still on market, the last of the checkbox. So it'll basically take an image and then extract all these fields based on the initial OCR and then based on the web search complementing that. \ No newline at end of file diff --git a/transcripts/uncorrected/96.txt b/transcripts/uncorrected/96.txt new file mode 100644 index 0000000000000000000000000000000000000000..da218ad130c3c5a5f3ca672509c6c517f4fa87f2 --- /dev/null +++ b/transcripts/uncorrected/96.txt @@ -0,0 +1 @@ +I'd like to create an app that does the following. The user will paste a screenshot from their calendar or there's a text field for calendar entries for a certain time period. Below that there is a voice recorder. The voice recorder will let out the user to record a voice message, record, pause, stop, and or retake.

When the user is instructed to narrate their timesheet for the week, and the user can also select a date for week commencing, just to validate when the first date that they're referring to in this timesheet is. When those three fields are provided by the user they get sent to Gemini and Gemini will then generate a timesheet based upon the user description with activities per day.

The meeting information that was received will be added. So I might diarize specific meetings that were referenced. So combining the two sets of data. And finally based the user might if the user includes a time spent estimate how many hours were spent per day on a certain project or task it will then calculate the estimated total hours spent and then a summary section.

This will be provided as a document which is created in markdown with the user it's rendered in rich text on the screen and the user can click download and if they do that it'll download the timesheet as a markdown file with the title automatically file name timesheet for week commencing in machine readable case. \ No newline at end of file diff --git a/transcripts/uncorrected/97.txt b/transcripts/uncorrected/97.txt new file mode 100644 index 0000000000000000000000000000000000000000..0ec335394a72e80887a3672f290bc5828d8227e0 --- /dev/null +++ b/transcripts/uncorrected/97.txt @@ -0,0 +1 @@ +I'd like to create an app that is a meeting documentation assistant and it can provide three outputs from a voice input. So there's a voice recorder, so the user can record a voice note, pause, stop and retake, and then send. Once the voice note is sent, the user selects whether they want to generate a meeting minutes, an agenda for an upcoming meeting, so meeting agenda, or just those two actually.

And then if they do meeting agenda, it'll also generate a short version that can fit in a calendar description and a suggested meeting title. Upon receiving this from the user it gets sent to Gemini it analyzes the audio parses the audio and then generates a well minute or agenda as according to what the user selects with an automatically generated title a body that formatted in Markdown but renders in rich text so the user can download the original file with an automatically generated title a body that is formatted in Markdown but renders in rich text The user can download the original file and Runs the user would just clear the recording and start again.

It should also be able to automatically detect start time, end time, participants, action items, and it can deliver a... It will put those in organized fields in the output, even though the... and maybe the user can edit those to rectify any mistakes. And then when they click download, it will combine the corrected or uncorrected version as the case may be to generate the actual document for the minutes or the agenda. \ No newline at end of file diff --git a/transcripts/uncorrected/98.txt b/transcripts/uncorrected/98.txt new file mode 100644 index 0000000000000000000000000000000000000000..243f36cf36c052964af7ebe83a792dae9e67d205 --- /dev/null +++ b/transcripts/uncorrected/98.txt @@ -0,0 +1 @@ +I'd like to create an app which will do the following. It's a voice-to-voice app. The user will record a voice message. The voice recording in the app. The voice recording gets sent to Gemini with a transcript. Gemini's task is to create an abbreviated version of the Voice Message, as short as possible. Essentially cleaning it up. This stage is not shown to the user.

But what happens next is that it gets text to speech, it gets synthesized, the user can choose between a male or a female voice. Yeah, and once that, once the generated audio is created, it presents to the user, the user can download it. So it's essentially taking audio from the user, cleaning it, condensing it, synthesizing it, and then download.

Come up with an imaginative name for this use case. \ No newline at end of file diff --git a/transcripts/uncorrected/99.txt b/transcripts/uncorrected/99.txt new file mode 100644 index 0000000000000000000000000000000000000000..35a55fa10abb62fbf49bc2c38d73e8cc53fca620 --- /dev/null +++ b/transcripts/uncorrected/99.txt @@ -0,0 +1 @@ +This is called Impact Report Finder. The objective is that the user will provide the name of a company and the AI tool, Gemini, will attempt to find any voluntary sustainability disclosures, impact disclosures that they've written from the internet and it will send them by year. If they include data about their GSD admissions there will be a tick symbol and there will be a link to the result and there will be a direct link to the PDF. and Jeff.

So after the user provides the name of the company, there can be a... if Gemini needs to disambiguate, it will ask the user in a text box below, can you clarify and then the user can hit submit again, otherwise it's more than an interactive chat app, it just provides those search results in that specific format with the reports chronologically from by year, if there's multiple ones by year, by date of release, and then if they have GSG data, a link to the data sheet if it's separate, or just the PDF, but basically annotated table of links. \ No newline at end of file