diff --git a/audio/.claude/settings.local.json b/audio/.claude/settings.local.json new file mode 100644 index 0000000000000000000000000000000000000000..681e385e457b498fbfe00e10e71d311d1f273680 --- /dev/null +++ b/audio/.claude/settings.local.json @@ -0,0 +1,11 @@ +{ + "permissions": { + "allow": [ + "Bash(git checkout:*)", + "Bash(git branch:*)", + "Bash(git push:*)" + ], + "deny": [], + "ask": [] + } +} \ No newline at end of file diff --git a/audio/106.mp3 b/audio/106.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..fab00f6b3fe239cf16e35aeb0c01f0da71e844c3 --- /dev/null +++ b/audio/106.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc730bd3778d23585dc29230711c9b457fe03bc850048bebe2b64326e2fec867 +size 1195244 diff --git a/audio/107.mp3 b/audio/107.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..f9731c96086702c14484ea89e5fb730ca6b2f4d1 --- /dev/null +++ b/audio/107.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01eed4864f25b24ba884d7301e65f70c79a3976bc15ef802851771a2378d6900 +size 3082796 diff --git a/audio/108.mp3 b/audio/108.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..3b6aa37b9400c89d2fa1ebbb16dd31bc1f870b0b --- /dev/null +++ b/audio/108.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d13d628f866e3b919557a038c086e50ec833178d0ebdace0f82856f89648c55 +size 3555116 diff --git a/audio/109.mp3 b/audio/109.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..085cf340c20e6f1f72013db1e8a1f0cdd78915ca --- /dev/null +++ b/audio/109.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6850c4f107264ce5b76483343ab4b483f2efabe45e74007d1f93e616e61c8244 +size 3589676 diff --git a/audio/110.mp3 b/audio/110.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..91b13bb20cb5b8498efa3d619357f1e9c6d60153 --- /dev/null +++ b/audio/110.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d00599d3919bb5ae0cade9a3c0466f5c0a4be5c88e063872ab653f41c559586 +size 1840235 diff --git a/audio/111.mp3 b/audio/111.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..ba8afc99b3f9408ea0974c956fa5b20f95615a86 --- /dev/null +++ b/audio/111.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36ab597df831bfd434e6b36268a85bf28577953802e3bedd5eb6c78370eb4d83 +size 1408364 diff --git a/audio/112.mp3 b/audio/112.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..b988f62e7661547ac6e9be93631eddfe78359af9 --- /dev/null +++ b/audio/112.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:407436b368702db090420ede39de07a412427a8744e85bfa153489aa0f184afc +size 2026436 diff --git a/audio/113.mp3 b/audio/113.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..5cf2200375b6cbaf13b015abf1c1e79a8aafa6cf --- /dev/null +++ b/audio/113.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e79f2efe462ce355e52353ada0ec48f2f38a95152b8fc5a2f6d7de9b71be141c +size 1009196 diff --git a/audio/114.mp3 b/audio/114.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..090836bf1c582b716050d9f1c69b20010b54aa4c --- /dev/null +++ b/audio/114.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8837d43c94fae93338ba684eeea8ca34ea2651739175a08540a8e95d7c69f827 +size 1843244 diff --git a/audio/115.mp3 b/audio/115.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..918698cb5ab6eae12cee82d7c9d606d2b45e43d7 --- /dev/null +++ b/audio/115.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44081118b29aab3a48fc05ba9bfeee35c7bdfbc0bc1febaa265f0f8fe527df38 +size 1025324 diff --git a/audio/116.mp3 b/audio/116.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..3f939925ace66acd4192f690f912884e1ebe3719 --- /dev/null +++ b/audio/116.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:002da85d30a0a1c3b1170f71bc5accda4048cb340f90b9259e69e57b5dac66a5 +size 940076 diff --git a/audio/117.mp3 b/audio/117.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..a0a4f0ae636b1b7c3401b6d418abd913dbbe6ba6 --- /dev/null +++ b/audio/117.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:163a494deeaf2756086bd0a74d5b4cca53084fc2c49bff7c8fdc71cb9f98190a +size 2008556 diff --git a/audio/118.mp3 b/audio/118.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..4463b2604f24310948da27cfc6aff6ee32f1e208 --- /dev/null +++ b/audio/118.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92d935e6ad7efd69d53eda1811e58f8665003891445d2a2c21655857e65c1f79 +size 1143404 diff --git a/audio/119.mp3 b/audio/119.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..c488809da6fa51c773aa8d209703b26198d3f2cc --- /dev/null +++ b/audio/119.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f0a3053de45eb3af3f550b5537f328eb5eaab17631b86d0a2134a3b4c8417fd +size 5435756 diff --git a/audio/120.mp3 b/audio/120.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..ffef393e911773cb185dcf86890b76c9cf16fe80 --- /dev/null +++ b/audio/120.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb83a519381068b4f94814af61d5faf9cab6b1d938fc0ccb82569450124fd0a7 +size 3452721 diff --git a/audio/121.mp3 b/audio/121.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..2c4f9004669b48a8ba087072a6fbf57f40adbaa5 --- /dev/null +++ b/audio/121.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f00ce052c299be61c91fff279e9a5eb53b98d5ae38106e07b5e50c0438b0cb13 +size 1584044 diff --git a/audio/122.mp3 b/audio/122.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..95194d09fdc1d8d031b035e72c46fd2f5bcf6f4b --- /dev/null +++ b/audio/122.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9de9bf2dbe8d202d27535e943fbf363b6dd1384679f447c8b85aadc0befe18ae +size 3131756 diff --git a/audio/123.mp3 b/audio/123.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6f2a272099ff82af4718067ac90ca88f6b054eeb --- /dev/null +++ b/audio/123.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61d1579b37799ad2973d83f25ed219ace0dfd917af59c28cf6b731ba42f4dd02 +size 2294139 diff --git a/audio/124.mp3 b/audio/124.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..9281a9b28ec6c86cbc029734815338fa6d5f13c1 --- /dev/null +++ b/audio/124.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3e8ca6ddc29ffbb4e12e91fec4a62f8d9f10445f07663002827cc3b522dcbd5 +size 2507298 diff --git a/audio/125.mp3 b/audio/125.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..291aaffe3e25450fc2bb01c2d32fbf4862fe1d7c --- /dev/null +++ b/audio/125.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acbdb672d974c2a97f6bd22ce9d279cbb0993160925b8a265b178e582e1690c9 +size 1057188 diff --git a/audio/126.mp3 b/audio/126.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6405e8d2f49acb0d60960e3590004672e50fbf6c --- /dev/null +++ b/audio/126.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d7d6f0fc438386e1899903d615a17029d2f94ae60ff55de5551c4f52c17483b +size 1047784 diff --git a/audio/127.mp3 b/audio/127.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..e190fdc402747b264f04c51c7355e67f92397a31 --- /dev/null +++ b/audio/127.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0ea8fd0238dc9832f379d59de2d7f3f772c5589dbaa2773d9aefd0941427e29 +size 888236 diff --git a/audio/128.mp3 b/audio/128.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..f1d3ff3ddeac8bfe62dc369904bde331dffb962e --- /dev/null +++ b/audio/128.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7373d80239298dbd1734034c6698ded0bb532511b0abb4e8cea33bd3c973f01d +size 3327596 diff --git a/audio/129.mp3 b/audio/129.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..0e5aa2520b7ad1a2ca7197fd73955d7cca33a1ac --- /dev/null +++ b/audio/129.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e419c132f8fc0ec652a1ddf879dae4e245208e602d577d9fab9624ecd76abcc0 +size 780524 diff --git a/audio/130.mp3 b/audio/130.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..e19beee6b46b88aad242b197279cf9d18823fbbd --- /dev/null +++ b/audio/130.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a24ef88f2f688b4389215c9a121112c8113f7350983183402f8d6236429b720c +size 1520684 diff --git a/audio/131.mp3 b/audio/131.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..ac61e602ecb816f5a0bbbdbdd8a99cb5e28fe3b0 --- /dev/null +++ b/audio/131.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2872f14a162c2e9a40527401d68f42a41ed24a3854fe7f8188607daa1f07c8b4 +size 1428524 diff --git a/audio/132.mp3 b/audio/132.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..5e4136e7cb35725152f8ccd02aae84724a7fc9f1 --- /dev/null +++ b/audio/132.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7d4c58c17a01dbd7d79b43c198930e145da384869ededa76fba45fd1a71ccd5 +size 2604473 diff --git a/audio/133.mp3 b/audio/133.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..227ff7c362c7e0cdc93fc0976f0bda1272bb5351 --- /dev/null +++ b/audio/133.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2271ff918630384ac932f1ba1ca4d35d72cf123ecdcc1cc9179d07965273aecf +size 1147468 diff --git a/audio/134.mp3 b/audio/134.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..64eeb4ad49a0f244270f2f4cec5ea0aae25a46bf --- /dev/null +++ b/audio/134.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63a76711da5b953ebdd78a57925367a16dcbe29a3cef3882b391026dbdfbb229 +size 2319843 diff --git a/audio/135.mp3 b/audio/135.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..faccee864f9b93f0abdd59df621520a083edc9eb --- /dev/null +++ b/audio/135.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5db2fa9f1422c65201eaf1b0452f4dd266d9627b72dd92c496e296c7a9d2ea1c +size 2573036 diff --git a/audio/136.mp3 b/audio/136.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..16d89bfeb620d3ee7e866ec4427f531f7b558bc2 --- /dev/null +++ b/audio/136.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e49c4f843350c56ccb7736eefc09ed71d4e5dfb914845b083df8b3aba7943a4 +size 1386331 diff --git a/audio/137.mp3 b/audio/137.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7aa69259028f6445828b8fb2065ca0a868183562 --- /dev/null +++ b/audio/137.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27231d2eba0821695e0d991cbabd5579529d6bb87fe31058f6a5d2243ac1ed43 +size 2991295 diff --git a/audio/138.mp3 b/audio/138.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..907bbe1ee79f60ba5ef010137b92dfd0fcae4109 --- /dev/null +++ b/audio/138.mp3 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ca8d96a533a7c93fff1a138553866f54356257d0f279830306577722888ce7c +size 1706156 diff --git a/transcripts/uncorrected/106.txt b/transcripts/uncorrected/106.txt new file mode 100644 index 0000000000000000000000000000000000000000..205a49bfddedd83011954e6f20971ecdd44bdb12 --- /dev/null +++ b/transcripts/uncorrected/106.txt @@ -0,0 +1,15 @@ +Generate one for door opening event detected. + +Door closing event detected. + +Bathroom usage detected. + +All air conditioners are now turning on. + +All air conditioners will now turn off. + +Current temperature in the nursery is, just a placeholder. + +Current humidity in the nursery is. + +Air conditioning advised. \ No newline at end of file diff --git a/transcripts/uncorrected/107.txt b/transcripts/uncorrected/107.txt new file mode 100644 index 0000000000000000000000000000000000000000..1dad181212b75b63cfce347860bd0912a029c9f5 --- /dev/null +++ b/transcripts/uncorrected/107.txt @@ -0,0 +1,7 @@ +So a very powerful agent to add to the library. For the email drafting one, I will gather these in a tag called agent plans and they can be there together. This would be a very big one if it could be pulled off. And again, I come back to the question of if there's a front load transcription that doesn't need to route everything through voice notes. For example, if the N8N form element had a voice capture node, it would solve all of this very elegantly. + +So if I dictate an email and I use a transformation in voice notes, I need to say every time it's for me. It gets the names wrong all the time; it gets the style wrong. So it saves a lot of time but still a lot of nudging. System prompt to an email sending agent is like done very successfully in the past; it works very well. So the note comes in tagged, it goes to this, and the output should be basically ready to send every time. So that's not worth creating just to get to that step. + +What it would be worth creating to do is a contact matching and have to be saved into and so on. So, I'm going to go ahead and start the draft probably initially until it's validated or whatever human in the loop thing they have now. In other words, if I say send this to Ronnie or an email to Ronnie or an email to Stephanie or the people that I'm sending to frequently with the Google contacts integration which exists, I imagine they have set up an MCP. + +It can retrieve the person's email based on the entity match, put them in the to field, and that way I could just dictate emails that basically would be hopefully a queue of emails ready to go with one button of a push. So that would be a really cool one to try depending on if the contacts MCP is mature enough to support this. \ No newline at end of file diff --git a/transcripts/uncorrected/108.txt b/transcripts/uncorrected/108.txt new file mode 100644 index 0000000000000000000000000000000000000000..094c52446d96809e647b3dfdce3ed9a793bb7024 --- /dev/null +++ b/transcripts/uncorrected/108.txt @@ -0,0 +1,11 @@ +Okay, so I have the email header images done. I'm going to add in green invoice. So I just cleaned up the workflows I had created previously just to standardize on the structure. And in the JSON payload for the invoice and for the receipt, you get a detailed breakdown of the MAM capture, which can be very helpful because assuming I write this data out to a table, I can at any point in time see exactly what my position and so on. + +So, the question is against MAM based on the amount. So each time if, let's say my test, tests invoice for one Shackle and 18 Agurot of MAM were written, so that gets written as the MAM owed and then you can top that up or check the position of it. That's number one for invoices and for receipts, I'm just going to create separate workflows for the sake of it, although they could be branched into one. + +I'm going to suppress and Sibghts Billing in green invoice so that they can get a custom email with the pay URL which is also provided. There are four URLs provided in the payload. One is the payment URL for the client. You got the document URL in English, Hebrew, and Source. So you can actually download both and put them into your object storage. + +And then for expenses, the question remains if for Hanna at this point is it worth, probably not worth editing. Check the API docs again. It still seems to me as if you can't create, you can create an expense in Green Invoice, and many more. The only problem with this is that you can't have them put a document in your database and then have them do their document parsing. + +You can retrieve stuff, but if you're not using it for actual expense logging, there's no value in retrieving the expenses stored there because there won't be any stored. As far as I know, there's no ingress to the expense, although I should probably clarify with Green Invoice. There's no ingress where you can provide, upload an expense that you got and then collect the information. + +In any event, you have to go in and edit it by hand so it's probably not worth, it's probably worth just using this other system but the subscription is back in order with you because it's certainly a lot more, just better to use basically than the other one. \ No newline at end of file diff --git a/transcripts/uncorrected/109.txt b/transcripts/uncorrected/109.txt new file mode 100644 index 0000000000000000000000000000000000000000..05cdb7ec5f4d865f2ae4e1835c1a16cfef5943ed --- /dev/null +++ b/transcripts/uncorrected/109.txt @@ -0,0 +1,17 @@ +So I have a thought that I look into and which I thought I'd share with a friend working in SEO because it occurred to me that this is I feel what will this is would be a very productive way forward. + +So given the huge rise of AI, people are naturally concerned and aggrieved about IP protection and specifically large language models ingesting their blog content and websites into their training data without their consent, which is very reasonable. + +On the other hand, there's a huge opportunity for building thought leadership and branding by actually making it easy, as easy as possible for bots to scrape up your content. + +On the blocking side, you have companies like Cloudflare which are rolling out very quickly AI blocking features which are basically targeted denialists. + +I am curious to know and to see whether on the other side there are actually companies saying AI traffic is massive. It's a very legitimate referral source. + +When we're dealing with search engine traffic, we don't try to put up walls to make it hard for Google and others to index their sites. Why is that the approach you want to take with AI? + +I mean maybe you want to block your real IP; it could be your image galleries, but there's a big potential advantage in actually making it easy. + +So what does AI like? It likes structured data, it likes very clean metadata, and I'm curious to see if any companies and technologies are targeting this. + +Consultants explicitly branding themselves as optimize your site for AI readability, and if not, I predict that this will be a very big demand for this as people instinctively rush to block and then realize that, hang on, our competitors are getting user referral traffic from ChatGPT, etc. Let's undo that and make it easy. \ No newline at end of file diff --git a/transcripts/uncorrected/110.txt b/transcripts/uncorrected/110.txt new file mode 100644 index 0000000000000000000000000000000000000000..f10acd3d92355d82bfcc1301c90f18425c83cad8 --- /dev/null +++ b/transcripts/uncorrected/110.txt @@ -0,0 +1,13 @@ +I'd be interested to know, looking into AI agent workflows at the moment and beginning to gradually embrace the concept that multi-agent workflows have an important place. + +I say that because I tend to think always that I tried to consolidate, simplify, consolidate, get one agent to do many things. + +But I'm seeing examples where I have in a recent workflow an agent just for optimizing the user's voicemail transcription as a prompt. + +Then one for cleaning it up for a Gmail complaint HTML. + +I'd be curious to know in production use cases, can you give me some credible examples where multi-agent workflows are common and where you might actually have a significant amount of them in a chain? + +Give me a couple of examples of, let's say, workflows where you might need three agents like I have and one where you might credibly need eight or even more AI agents all in a sequential chain. + +Thank you. \ No newline at end of file diff --git a/transcripts/uncorrected/111.txt b/transcripts/uncorrected/111.txt new file mode 100644 index 0000000000000000000000000000000000000000..3df1c9c2664686b308f5a7dfdd0a138eb22a4c6b --- /dev/null +++ b/transcripts/uncorrected/111.txt @@ -0,0 +1,5 @@ +Okay, so I would say that the URLs being gathered are correct. They are valid. But I'm really more interested in coverage from the last three years. And there's a lot more than this. + +So what would you recommend as the most, what's going to give us the most effective results to just get this done? A paid service, a paid API that can get from what we have now, which is a basic selection, to a much more nuanced list of maybe 50 or even 60 stratified really by types, podcast, press coverage, etc. + +The structured output is very good, that part is excellent. It's just a discovery that it's falling down on. \ No newline at end of file diff --git a/transcripts/uncorrected/112.txt b/transcripts/uncorrected/112.txt new file mode 100644 index 0000000000000000000000000000000000000000..a04c21c4d414229b30cb243092e3abc823ee719a --- /dev/null +++ b/transcripts/uncorrected/112.txt @@ -0,0 +1,5 @@ +So a note to self, a reminder that I still, so the voice node export was that I did yesterday had about 1700 notes and it, I put it up in Hugging Set privately of course. This is a private data set, I have it locally and I didn't, my plan was to run Olam over it and say this is a really good and representative data set of all the type of notes that I take over about 3 months. + +What I wanted to do was entity recognition. This is a to-do list, this is a shopping list and then if we could firstly determine the top 50 recurring entities and then come up with a schema of let's say here are the top 20, here are the top 50 with the purpose of creating the most effective tagging system and on the back of the tagging system then creating the automations. + +So I need to identify the most recurring entities. Some exactitude. Problem for this workflow is that it's a substantial body of text. So I'm not sure that the way to go would be asking the LLM to iterate over the entire dataset. Or maybe it would just sample a little bit of text from the notes and then it would kind of work gradually. \ No newline at end of file diff --git a/transcripts/uncorrected/113.txt b/transcripts/uncorrected/113.txt new file mode 100644 index 0000000000000000000000000000000000000000..bd68049e31a9c825a325c40ba99af6596624bf36 --- /dev/null +++ b/transcripts/uncorrected/113.txt @@ -0,0 +1,7 @@ +I'm doing a lot of Python development on this computer. + +I want to do a bit of Android development too and who knows what else will come in the future. + +I'd like you to evaluate the packages I have on this computer, the environments I'm set up for in terms of development packages specifically, and see if you can identify any obvious gaps for what I have on the computer. + +And this is Ubuntu of course and install anything that would be primarily better for development including any SDKs that I might be missing, packaging files, that kind of thing. \ No newline at end of file diff --git a/transcripts/uncorrected/114.txt b/transcripts/uncorrected/114.txt new file mode 100644 index 0000000000000000000000000000000000000000..594f9aac61f926ff694321d55a0c31f8dc22abc6 --- /dev/null +++ b/transcripts/uncorrected/114.txt @@ -0,0 +1,9 @@ +So I'm going to try to do a kind of evaluation, not a scientific one, but just a kind of back of the hand one, looking at the respective merits of Sonnet, Nano 4.1 or whatever the latest OpenAI one is. + +The contender is the increasingly diverse range of tools that Windsurf is bringing in, and it would be good to pay close attention and see what else is emerging in this landscape. + +Sonnet 4 is the most expensive and seems to be kind of the go-to but frequently seems to really struggle on context. + +I feel like Google are going to eventually catch up with it in the model. + +They don't have the same sort of competitive product as they do, and just seeing really which is probably the least frustrating in turning out stuff actually working. \ No newline at end of file diff --git a/transcripts/uncorrected/115.txt b/transcripts/uncorrected/115.txt new file mode 100644 index 0000000000000000000000000000000000000000..5925644a0f2a40e93f9cf4fbb65454da997292e8 --- /dev/null +++ b/transcripts/uncorrected/115.txt @@ -0,0 +1,13 @@ +One experiment that I think would be very useful to do would be to try with the N8N API. + +I don't know if there is yet an MCP for N8N. + +That would be incredible for a self-hosted N8N server. + +Then I could really connect with Windsurf to either the local or the remote one. + +And say, you know, I want to create this workflow, can you start it? + +And actually develop it, execute the commands to build the workflow basically. + +So that's worth really looking into. \ No newline at end of file diff --git a/transcripts/uncorrected/116.txt b/transcripts/uncorrected/116.txt new file mode 100644 index 0000000000000000000000000000000000000000..3142f2b03f69a93221ee5e1fe107755269e9383c --- /dev/null +++ b/transcripts/uncorrected/116.txt @@ -0,0 +1,3 @@ +So a DevOps agent would be really interesting to try. I'm going to see if something's been made for this. The way I'm using Windsurf at the moment, which is, you know, have it on my local, then connect to the 1.2 VM, then connect to this environment, and then have it just run stuff on the command line. + +I've seen if anything's actually been made to do this, as in intentionally that's the purpose. And if so, it might be. If not, it might be an idea, but we just have to create a proof of concept firstly and to validate it. But, you know, it could be nice. \ No newline at end of file diff --git a/transcripts/uncorrected/117.txt b/transcripts/uncorrected/117.txt new file mode 100644 index 0000000000000000000000000000000000000000..4e4b11d5bfaf292e39fc2bfe4c948cdc20712382 --- /dev/null +++ b/transcripts/uncorrected/117.txt @@ -0,0 +1,9 @@ +So I'm currently using windsurf IDE in order to work on a large variety of projects, particularly including using it to actually do repairs on the local file system, which is a sort of unofficial off-label application for an agentic code editor, but I find this highly, highly effective. + +And the other agent code IDEs which can be used with a cloud LLM might be more cost effective. That's the only problem with Windsurf; especially if you use Sonnet 4, it becomes very expensive very quickly. + +I don't think agentic on a local, with a locally run LLM is an option. But it struck me that VS Code can be paired with just about any Cloud LLM, any extension. + +Among what's currently out there for agentic assistance, is there any that offers a truly different value proposition to Windsurf in the sense of being really affordable for almost truly unlimited usage? + +With Windsurf, it's just the usage caps, as well as the APIs they use frequently seem to run into exhaustion due to the sheer volume of users that they have. \ No newline at end of file diff --git a/transcripts/uncorrected/118.txt b/transcripts/uncorrected/118.txt new file mode 100644 index 0000000000000000000000000000000000000000..f9d2e662caef7bd697a4f1dd91098710e2bc794c --- /dev/null +++ b/transcripts/uncorrected/118.txt @@ -0,0 +1,3 @@ +I want to work today find out some for Contentful if there's Sanity Studio the pros and cons of that exactly but it would be good to see what they because the only thing that troubles me about this one is that the they're pushing the hire account and it's unaffordable for some people. + +I could I've asked them if there's for private users if they do a deal or something but that's the only thing is vendor lock if they ever pull this free tier I don't find myself screwed basically. \ No newline at end of file diff --git a/transcripts/uncorrected/119.txt b/transcripts/uncorrected/119.txt new file mode 100644 index 0000000000000000000000000000000000000000..f9b0c24f9a48c07cb938e2e8f5e9f3b937eeea58 --- /dev/null +++ b/transcripts/uncorrected/119.txt @@ -0,0 +1,7 @@ +So I'm currently using N8N quite extensively for automations. It is an excellent platform, very powerful, learning curve for sure. The issue I find is that, so I'm using Windsurf IDE for a lot of things in general, especially for automation. What I find with N8N is that it can take a very long time to configure a workflow when you're creating each step manually in an automation chain. But if I can, on the other hand, prompt an AI agent in Windsurf to generate a Python script to achieve the same thing, it could take me minutes rather than potentially even hours. So it's a lot more efficient to do it at the code level just in Python. + +What I'm thinking is that as basically all I need is a server with Python script running, which is the core of what probably N8N is under the hood, it might be more efficient to begin migrating some of these scripts or creating some of these scripts deployed directly on a server in a code environment. This would give me the ability to have an AI agent connect directly to the server, edit my workflows, edit the, etc. + +So my question is as follows: if I wanted to take that approach, one of the useful things in N8N, of course, is the ability to save credentials which can be used across your scripts for the different integrations. Is there any platform? I'm always trying to avoid reinventing the wheel. Is there any platform that is intended for this? I keep thinking to myself that if a business comes along and wants to create different workflows for integrating different services, whether it's, you know, it could be relatively mundane back office operations, I can't imagine that they're going to go to N8N. Maybe they are. But I wonder if there's a platform that is intended to provide the overall framework for holding together a bunch of automation. + +There might be a GUI for actually managing the Python scripts, managing the environment variables, and that would provide the code frontend to N8N. It would still, however, be important to be able to edit them locally, in other words, to edit or deploy scripts that then get synced up to the deployment environment. But what would you say is the sort of code-first approach here? Is there a framework that is the equivalent of N8N for this? Or would most people just deploy their own Python script library to a server, have it run, and that's how they manage automation scripts in production? \ No newline at end of file diff --git a/transcripts/uncorrected/120.txt b/transcripts/uncorrected/120.txt new file mode 100644 index 0000000000000000000000000000000000000000..d851e5b8723fc4ab93e27bc02f0d936c5e6ec80e --- /dev/null +++ b/transcripts/uncorrected/120.txt @@ -0,0 +1,7 @@ +So I came across recently there's a I didn't realize that there's already the first class of sensor processing units that are available at a consumer price point and in a consumer form factor. I'm referring to the Google Coral series for about $60 etc. They're most popular for computer vision workflows. + +I came across them looking at motion detection for IP cameras, and what I'm unclear about, I'm not sure about, is to what extent, because they're so small and relatively cheap, they actually going to make, let's say you had a computer that wasn't really very well equipped for running stuff with AI. Let's just say a very basic GPU. If running one of these or doing the more conventional upgrade of putting in a better GPU, would you expect something significant from this small addition? + +It seems to me that it's doubtful that it would really bring that much more inference versus a more traditional hardware upgrade. But the other question that I have is for speech to text, which is something that I'm looking at for a long time. I know that Google's Pixel phones for on-device STT are considered the best in class. + +So from a hardware standpoint, is there anything like, why is it, if you wanted to get something, if the Coral is an example of something that you can achieve big things for relatively cheap by focusing on the right hardware for the task, is there anything comparable for speech to text? \ No newline at end of file diff --git a/transcripts/uncorrected/121.txt b/transcripts/uncorrected/121.txt new file mode 100644 index 0000000000000000000000000000000000000000..559124e4a5dc8b2e3625a8dddd6f1a2d83e9f053 --- /dev/null +++ b/transcripts/uncorrected/121.txt @@ -0,0 +1,3 @@ +So if we're struggling with Gemini, it seems to me that this may be a good task for a model like Gemini in conjunction with a search engine API of some kind. Try it one more time with the fix. And if you can think of an external help, maybe Perplexity I was thinking. Really, whatever could have a good pool of historical news mentions or just general scraping, to be honest. Firecrawl, if that's an option. + +If the next attempt fails, try to think of more diverse approaches to gather this list. I think it should definitely be feasible. There's also a lot of interviews of him on YouTube and on podcasts, so something that could scrape YouTube and add those to the array would be helpful as well. \ No newline at end of file diff --git a/transcripts/uncorrected/122.txt b/transcripts/uncorrected/122.txt new file mode 100644 index 0000000000000000000000000000000000000000..d2c58fc11c7d461951ac401225f8af5278a2678f --- /dev/null +++ b/transcripts/uncorrected/122.txt @@ -0,0 +1,3 @@ +So we have an AI agent workflow which is currently triggered by a voice note that I tag. The workflow works very well, I validated it. And I'm trying to think about other ways that workflows like this can be a little bit easier to use. So this is, it's a single turn agent. The user will send in a question, a parenting question. And then it comes out to a note, to an email, to a shared inbox so that we can both get the receipts. So that aspect works very well and I think it has to be conversational because the other, a single turn because the only way that it could be conversational would be you'd have to get involved in managing chats and having the agent summarize the chat would actually be a lot more complicated. + +And so on and the other way around. So the way that I go turn agents from the user is you can do a telegram bot. A form is very simple to set up but it kind of awkward to use because who wants to complete a form to prompt an AI tool? The voice message app that I'm using, which is Voice Notes and it's a tag is probably for me, it's my favorite way I've found so far. It's very easy. And so on. And so on. \ No newline at end of file diff --git a/transcripts/uncorrected/123.txt b/transcripts/uncorrected/123.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c400cf754697c9807ec7c299b698b31848cd2c5 --- /dev/null +++ b/transcripts/uncorrected/123.txt @@ -0,0 +1,3 @@ +I'm currently storing AI outputs in Gmail because it's a workflow that I've built and I'm eager to stick with it. I tend to like when you're using a prompting workflow that then delivers to the user. It always seems to me that there's a little bit of a vacuum of thought into what the most obvious delivery model should be. I personally like email because a well-formatted email is kind of nice to read. I've tried Slack, which tends to have horrible restrictions around formatting. + +I'm looking into saving to a node editor, but to be honest, the only negative about Gmail is that I can see if I run like 100 prompts per day it would really clog up my inbox. I'm thinking about using a dedicated Google group to obviate this problem so long as you can read it easily on an Android. But just looking at it purely from a capacity standpoint, how many prompts per day could I store if I just wanted to keep the notifications this way without overwhelming my Google Workspace storage? I imagine the volume might be quite significant in terms of how many messages you can retain and etc. Give me an example where I could credibly keep this many and have my consumption usage and any other limits not really be significantly adversely affected. \ No newline at end of file diff --git a/transcripts/uncorrected/124.txt b/transcripts/uncorrected/124.txt new file mode 100644 index 0000000000000000000000000000000000000000..41b769c666909652edafca4258285af128085915 --- /dev/null +++ b/transcripts/uncorrected/124.txt @@ -0,0 +1,7 @@ +So for an AI agent, let's say I have an app, or it's currently a custom app, I'm just experimenting with different configurations. It's our parenting assistant agent, so the system prompt is pretty good. It emails us the output after the agent gets the prompt, and it also saves it to a note. So that end of it is good, and there's a webhook trigger. + +So really, I'm just trying out different input methods to get the prompt to the webhook. The app is good, but I would actually prefer not to rely on something that's custom made. Two ways of messaging the AI agent would be very, very convenient. + +The first would be sending a voice note through like a messaging app, either WhatsApp or Telegram. And less convenient would be something like Slack. I think that Telegram, knowing the input channels that are feasible, I think Telegram is actually the best bet. + +So if I wanted to create a Telegram bot that whenever it received either a text or a voice and so on. \ No newline at end of file diff --git a/transcripts/uncorrected/125.txt b/transcripts/uncorrected/125.txt new file mode 100644 index 0000000000000000000000000000000000000000..668b21fff236e8df63f193a6fde9b0904d46d6b1 --- /dev/null +++ b/transcripts/uncorrected/125.txt @@ -0,0 +1,3 @@ +I'm going to see if there's any migration path from Home Assistant OS through to Home Assistant on a VM, what any downsides might be. + +The reason I'm thinking about this is that if you do it at the HAOS level, while it seems at first initially like the most flexible option, the fact that you can't, the permissions, the SSH control, all that would probably not be required if it was running on top of a multibox where you can always mount and edit the connected file systems. \ No newline at end of file diff --git a/transcripts/uncorrected/126.txt b/transcripts/uncorrected/126.txt new file mode 100644 index 0000000000000000000000000000000000000000..a0b3bb8e6aad30bda734c185ee669999b307a4ed --- /dev/null +++ b/transcripts/uncorrected/126.txt @@ -0,0 +1,5 @@ +I got an email from a guy at a company that I want to check out tomorrow. It's called Mirai and it is on-device AI. + +It says that it's 50% more efficient than Lama CPP. + +So I want to look into it and see if there's any use case for running speech-to-text models specifically at the edge. \ No newline at end of file diff --git a/transcripts/uncorrected/127.txt b/transcripts/uncorrected/127.txt new file mode 100644 index 0000000000000000000000000000000000000000..0ca4629ad72096ac064a06070ce1a8a61855f6c5 --- /dev/null +++ b/transcripts/uncorrected/127.txt @@ -0,0 +1,5 @@ +So I'm trying to create a network of speakers in the apartment for the purpose of sending out alerts and home assistant, basically text-to-speech alerts. + +How does MPD fare for this purpose? What I noticed when I was using Snapcast and Snapserver, very hard to set up and actually it didn't achieve good sync at all, to my surprise. + +My question is, is it reasonable to just use MPD one on each speaker? Or should I try to find a better solution for syncing them through as a server, the playback? \ No newline at end of file diff --git a/transcripts/uncorrected/128.txt b/transcripts/uncorrected/128.txt new file mode 100644 index 0000000000000000000000000000000000000000..28a1d1f6735490390b4ab1e53dec86adb7e5e3bd --- /dev/null +++ b/transcripts/uncorrected/128.txt @@ -0,0 +1,9 @@ +One interesting idea from my voice note thing, idea, or system as it's coming to fruition. In the last note, which I could have articulated a bit more carefully had I thought of this, and the other one is a single turn prompt and a prompt for an autonomous workflow. So if I said I want to do salary research in Israel, you have my background in context, and so on. + +So, that could actually be sent to a multi-step research agent such as the ones that Zapier do very well but I'm sure could be articulated. For that reason, I'll create a tag for prompts is just the regular standard prompts, single term prompts I need this information give it back to me I recording it now and that would be that can be a second one for deep research basically. + +And I sure if I look in the N8N workflow directory speaking of which I should go through it and add templates that are trending or promising or top rated. When you bring them in, you do need to have the credentials mapped. So if they use something that you can swap out, you still need to, that's the only limitation to it. + +But I'm sure that's one that I've seen people targeting and which could be very useful. So instead of just coming back with a single turn, I guess the difference, and it's a bit of a gray area, what is the difference? And it's probably tool usage and multi-step. + +So in this example for salary research, it goes to an agent with maybe SERP API certainly probably perplexity and maybe even autonomous tool usage. So it would say maybe there's a jobs API, etc. \ No newline at end of file diff --git a/transcripts/uncorrected/129.txt b/transcripts/uncorrected/129.txt new file mode 100644 index 0000000000000000000000000000000000000000..d781a980c67e28b7f625b9f578b9d2427efca0c2 --- /dev/null +++ b/transcripts/uncorrected/129.txt @@ -0,0 +1,7 @@ +What might be useful in terms of the other AI components I'm thinking about besides vector database? + +Persistent memory. + +Are there any self-hostable projects that could fit onto this Docker network for persistent memory? + +And other specific tools emerging for that. \ No newline at end of file diff --git a/transcripts/uncorrected/130.txt b/transcripts/uncorrected/130.txt new file mode 100644 index 0000000000000000000000000000000000000000..5910841b1b461d6103c7ce7210e16c2bd3760eca --- /dev/null +++ b/transcripts/uncorrected/130.txt @@ -0,0 +1,5 @@ +I'm going to look in today as well into the question of the routing of the phone. It seems like it's better to do it sooner rather than later because it just gets harder. + +The first thing to see is if with ADB I can pull out an extract of everything on the phone in such a way that it can just be quickly reinstalled as it's a good version of apps. And the second one is to check out quickly. I have a feeling that this OnePlus is very good and well supported. The only question is just to verify with the exact build number and model number if there is a ROM that works. + +Assuming that there is, validate and check for real what the actual process is in terms of how long it'll take, what to set, how complicated it is, if it is an easy thing, and then just flash it and so on. \ No newline at end of file diff --git a/transcripts/uncorrected/131.txt b/transcripts/uncorrected/131.txt new file mode 100644 index 0000000000000000000000000000000000000000..74c0da70573a35bb2944ee3f64dc55c5ee361e69 --- /dev/null +++ b/transcripts/uncorrected/131.txt @@ -0,0 +1,5 @@ +So Restreamer is doing very well. It's driving some excellent streams that I'm able to integrate into other programs now. The self-created app EITS can be taken down. + +And I'm wondering if Restreamer firstly has a web UI of its own for administration that might be useful to have access to. And secondly, if there is a NVR that you can recommend that would be able to play these camera streams. I need one grid sometimes and the other thing is just being able to toggle between individual cameras, nothing too complicated. + +But as the restreaming is already working well, it seems to me that it's better to work with what the restreaming produces than with the source RTSP. If you know of any NVRs that would be appropriate, you can add them to the stack and then we'll get back to the file browser at a later point. \ No newline at end of file diff --git a/transcripts/uncorrected/132.txt b/transcripts/uncorrected/132.txt new file mode 100644 index 0000000000000000000000000000000000000000..9df2e0976ca8649506ed27a81bbdb48bcba521ae --- /dev/null +++ b/transcripts/uncorrected/132.txt @@ -0,0 +1,5 @@ +Question for Snapcast, sending things in to it. The server or Snap server I should say. And then connecting that to multiple speakers in different rooms. + +What I have in the current architecture is MPD in front of Snapcast. And that was chosen. Firstly, Snapcast is quite an impressive system. I have the Snap clients running on the devices and I see that it could be really useful for stuff like home cinema or just creating kind of cool soundscapes in your house. + +Let's say I put the Home Assistant's use to the site. Let's say that I wanted to maybe have something running here. And I'm driving out to multiple speakers. What would be the best way to do that? And are there other, whatever role NPD plays in the stack, are there other more versatile or more modern stream intake layers that could do a better job. \ No newline at end of file diff --git a/transcripts/uncorrected/133.txt b/transcripts/uncorrected/133.txt new file mode 100644 index 0000000000000000000000000000000000000000..3d65707131f02e2a52754f7669f6aeb4ed0bf416 --- /dev/null +++ b/transcripts/uncorrected/133.txt @@ -0,0 +1,7 @@ +I'm using Suno AI to generate songs and it's quite a lot of fun. I'm wondering, does Suno have an API? Because I want to generate a little kind of songbook for our son. And I've created a few, but I'd like to do a few more variations. + +Is there anyone in that, these song generators? I know that Suno is kind of restrictive in that respect. + +And I know that there are generative AI APIs coming on board a lot. What is the technology that Suno does in which they create music from prompts? + +I think I saw before what was actually powering it and who else is doing this? \ No newline at end of file diff --git a/transcripts/uncorrected/134.txt b/transcripts/uncorrected/134.txt new file mode 100644 index 0000000000000000000000000000000000000000..8ce1e197c614a65743058179a4c94d84ffcae881 --- /dev/null +++ b/transcripts/uncorrected/134.txt @@ -0,0 +1,5 @@ +My wife and I both find this remarkable that it's still considered a not essential but good thing to do to take a parenting course. When we had our first boy, it occurred to me shortly after birth that there are so many basic things like how to safely hold a baby, how to change a diaper that we thought was the last minute to learn, but even then there was really no sort of structure for doing so besides watching YouTube videos. + +I'd be interested to know if my sister tells me who lives in London that parenting courses are becoming an increasingly popular thing, often sponsored by the National Health Service. In Israel, where I live, it's a marginal dynamic, if at all. + +The second thing I'm wondering is, I would like the idea of taking one of these courses through video, remotely, including aspects of First Aid. Are there any reputable courses or apps you can recommend for people in my situation who feel after the fact that it would have been useful to do something like your course but we now have our newborn? What would you recommend? And I'd love to get your thoughts on the question regarding the prevalence and popularity of these parenting courses. \ No newline at end of file diff --git a/transcripts/uncorrected/135.txt b/transcripts/uncorrected/135.txt new file mode 100644 index 0000000000000000000000000000000000000000..c3e20c2765c52eae158be9fef1ff3e5093856b5a --- /dev/null +++ b/transcripts/uncorrected/135.txt @@ -0,0 +1,5 @@ +So I've been recording voice notes for a number of months now, and there's a feature in the client I use called voice notes, which allows you to download the audio—in other words, the original audio recording that you submitted that then got transcribed. I don't really like this feature because I never want, I never use those audio recordings. I don't like the idea that I have them and the rest of the team. So I have a bunch of these audio recordings out there in the cloud, but I am interested to know if there's any use I can make of them. + +So I have something I've really thought about doing for a while, which I'd love to do, is creating a fine-tuned speech-to-text model optimized for my own voice recognition performance and the words that I use. The issue is this: the transcripts from voice notes are going to be imperfect. And I know that for training speech-to-text models, you typically have a source of truth file and a transcript. And that's how they're trained. I don't have the source of truth because I haven't manually written any transcripts. Without that, is there anything that I can still do? + +Let's say that I have several hundred hours aggregated of these recordings. It's quite a substantial bank of data of me speaking in different audio environments, with different levels of background noise, different times of the day, that I feel like could be useful. So I can download the MP3s, put them into a repository pretty easily. But I'd be curious if you can think of any actual uses I can make in the speech-to-text thing that I've mentioned. \ No newline at end of file diff --git a/transcripts/uncorrected/136.txt b/transcripts/uncorrected/136.txt new file mode 100644 index 0000000000000000000000000000000000000000..1a3fb7b526b7459b5dd0816c67a8227af5f46954 --- /dev/null +++ b/transcripts/uncorrected/136.txt @@ -0,0 +1,3 @@ +I find watching the early development of our newborn son to be really fascinating. It's made me wonder things about what the experience is of being a baby that I never thought of before. + +I'd be curious to know whether there's been any good documentaries you can recommend or books from the past few years which might touch upon our evolving understanding of this period of life. What kind of experiences newborns are going through, how they develop, especially with especially valuable explanations of the scientific literature intended for a layperson audience and written over the past number of years, whether it's documentaries or books, make some informed recommendations. \ No newline at end of file diff --git a/transcripts/uncorrected/137.txt b/transcripts/uncorrected/137.txt new file mode 100644 index 0000000000000000000000000000000000000000..cf53a83669832fb8373d7326d43985fb68c3bbec --- /dev/null +++ b/transcripts/uncorrected/137.txt @@ -0,0 +1,5 @@ +I have a long-standing interest in the paranormal. I've always found the literature around dying to be morbidly fascinating, and many more. Melting of the division between this world and the world which may be beyond it for those who believe, of course. + +I often watch with curiosity how our newborn seems fixated on something in a specific part of the room. It doesn't really have a rational explanation, but they seem to be engaging in a very animated fashion with something or someone. + +Has any literature been written about this and легитимс.com? \ No newline at end of file diff --git a/transcripts/uncorrected/138.txt b/transcripts/uncorrected/138.txt new file mode 100644 index 0000000000000000000000000000000000000000..a09f0f50f1f98676ad28ff49de3ffd37082c33e5 --- /dev/null +++ b/transcripts/uncorrected/138.txt @@ -0,0 +1,5 @@ +Write a feature request to the Windsurf team. Say that I've had a couple of instances in which the app I'm using, Ubuntu, has run into corruption issues and I've had to delete the user data directory. + +I would ideally like to never be in a place in which I use my Windsurf rules or I lose my list of extensions which are installed. As Windsurf is already a cloud service, I was wondering if you might consider integrating some kind of backup functionality into the product in which the user's MCP servers, configurations, list of installed extensions, and very importantly, default Windsurf rules are all aggregated into a backup package that the user might be able to export. + +This backup package could, at the least, be synced to the cloud so that in the event that the user had to delete their user directory, for example, all this data could be quickly repopulated to get them back to their previous working configuration. \ No newline at end of file