question_slug stringlengths 3 77 | title stringlengths 1 183 | slug stringlengths 12 45 | summary stringlengths 1 160 ⌀ | author stringlengths 2 30 | certification stringclasses 2
values | created_at stringdate 2013-10-25 17:32:12 2025-04-12 09:38:24 | updated_at stringdate 2013-10-25 17:32:12 2025-04-12 09:38:24 | hit_count int64 0 10.6M | has_video bool 2
classes | content stringlengths 4 576k | upvotes int64 0 11.5k | downvotes int64 0 358 | tags stringlengths 2 193 | comments int64 0 2.56k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
minimum-number-of-operations-to-convert-time | [Java] Greedy Solution | java-greedy-solution-by-maddetective-6alc | \n// Greedy Solution\n// Time complexity: O(1), O(T) where T is a constant\n// Space complexity: O(1)\nclass Solution {\n private static final int[] TIMES = | maddetective | NORMAL | 2022-04-15T04:01:51.716153+00:00 | 2022-04-15T04:02:44.214609+00:00 | 82 | false | ```\n// Greedy Solution\n// Time complexity: O(1), O(T) where T is a constant\n// Space complexity: O(1)\nclass Solution {\n private static final int[] TIMES = {60, 15, 5, 1};\n \n public int convertTime(String current, String correct) {\n int t1 = Integer.parseInt(current.substring(0, 2)) * 60 + Integer.parseInt(current.substring(3));\n int t2 = Integer.parseInt(correct.substring(0, 2)) * 60 + Integer.parseInt(correct.substring(3));\n int ops = 0;\n for (int time : TIMES) {\n while (t1 + time <= t2) {\n t1 += time;\n ops++;\n }\n }\n return ops;\n }\n}\n``` | 1 | 0 | ['Java'] | 0 |
minimum-number-of-operations-to-convert-time | Java solution | java-solution-by-nishant7372-7usa | ```java\nclass Solution {\n public int convertTime(String current, String correct) {\n int sum=0;\n sum+=Integer.parseInt(correct.substring(0,2 | nishant7372 | NORMAL | 2022-04-10T04:26:31.967574+00:00 | 2022-04-10T04:28:00.249294+00:00 | 58 | false | ```java\nclass Solution {\n public int convertTime(String current, String correct) {\n int sum=0;\n sum+=Integer.parseInt(correct.substring(0,2))-Integer.parseInt(current.substring(0,2));\n int min = Integer.parseInt(correct.substring(3,5))-Integer.parseInt(current.substring(3,5));\n if(min<0)\n {\n sum--;\n min+=60;\n }\n if(min>=15)\n {\n sum+=min/15;\n min-=15*(min/15);\n }\n if(min>=5)\n {\n sum+=min/5;\n min-=5*(min/5);\n }\n sum+=min;\n return sum;\n }\n} | 1 | 0 | [] | 0 |
minimum-number-of-operations-to-convert-time | simple javascript | simple-javascript-by-pdd1986-3bo0 | \nconst convertTime = function(current, correct) {\n const [currentHH, currentMM] = current.split(\':\').map((val) => Number(val));\n const [correctHH, co | PDD1986 | NORMAL | 2022-04-10T00:49:45.919137+00:00 | 2022-04-10T00:49:45.919162+00:00 | 123 | false | ```\nconst convertTime = function(current, correct) {\n const [currentHH, currentMM] = current.split(\':\').map((val) => Number(val));\n const [correctHH, correctMM] = correct.split(\':\').map((val) => Number(val));\n \n let total = 0;\n \n total += correctHH - currentHH;\n \n let diff = correctMM - currentMM;\n \n // for negative where needed minutes are lower than the current minutes\n if (correctMM < currentMM) {\n total -= 1;\n diff += 60;\n }\n\n\n if (diff >= 15) {\n total += Math.floor(diff / 15);\n diff = diff % 15;\n }\n\n if (diff >= 5) {\n total += Math.floor(diff / 5);\n diff = diff % 5;\n }\n\n if (diff > 0) {\n total += diff;\n }\n\n return total;\n};\n``` | 1 | 0 | ['JavaScript'] | 0 |
minimum-number-of-operations-to-convert-time | Simple greedy solution || O(1) time | simple-greedy-solution-o1-time-by-decode-p705 | In this problem greedy strategy worked because given values {60,15,5,1} are divisible with each other.\n\n\nclass Solution\n{\npublic:\n int convertTime(stri | decoder_j | NORMAL | 2022-04-09T14:35:19.218836+00:00 | 2022-04-09T14:35:50.562743+00:00 | 125 | false | **In this problem greedy strategy worked because given values {60,15,5,1} are divisible with each other.**\n\n```\nclass Solution\n{\npublic:\n int convertTime(string current, string correct)\n {\n // if current is same as correct we return 0\n if (current == correct)\n return 0;\n\n // converting the string into int using stoi() function\n int h1, h2, m1, m2;\n h1 = stoi(current.substr(0, 2));\n h2 = stoi(correct.substr(0, 2));\n m1 = stoi(current.substr(3, 2));\n m2 = stoi(correct.substr(3, 2));\n\n // storing the difference between current and correct\n int time = 0;\n time += (m2 - m1);\n time += (h2 - h1) * 60;\n\n // using greedy method to find the minimum moves\n int ans = 0;\n ans += time / 60, time = time % 60;\n ans += time / 15, time = time % 15;\n ans += time / 5, time = time % 5;\n ans += time;\n\n return ans;\n }\n};\n```\n\n**Time Complexity: O(1)**\n**Space Comlexity: O(1)** | 1 | 0 | ['C'] | 1 |
minimum-number-of-operations-to-convert-time | Java Solution | java-solution-by-12arpit-u75q | \nclass Solution {\n public int convertTime(String current, String correct) {\n int rem=time(correct)-time(current);\n int[] ops={60,15,5,1};\n | 12arpit | NORMAL | 2022-04-09T13:40:17.706496+00:00 | 2022-04-09T13:41:39.022687+00:00 | 38 | false | ```\nclass Solution {\n public int convertTime(String current, String correct) {\n int rem=time(correct)-time(current);\n int[] ops={60,15,5,1};\n \n int res=0;\n for(int op:ops){\n res+=rem/op;\n rem=rem%op;\n }\n \n return res;\n }\n \n private int time(String time){\n int h=Integer.parseInt(time.split(":")[0]);\n int m=Integer.parseInt(time.split(":")[1]);\n \n return h*60+m;\n }\n}\n``` | 1 | 0 | [] | 0 |
minimum-number-of-operations-to-convert-time | Java O(1) simple approach | java-o1-simple-approach-by-kcstark-uie1 | \nclass Solution {\n public int convertTime(String c, String co) {\n String[] s1=c.split(":") ;\n String[] s2=co.split(":") ;\n int ct= | KCstark | NORMAL | 2022-04-06T09:53:20.811980+00:00 | 2022-04-06T09:53:20.812016+00:00 | 124 | false | ```\nclass Solution {\n public int convertTime(String c, String co) {\n String[] s1=c.split(":") ;\n String[] s2=co.split(":") ;\n int ct= Integer.parseInt(s1[0])*60 + Integer.parseInt(s1[1]);\n int rt= Integer.parseInt(s2[0])*60 + Integer.parseInt(s2[1]);\n // System.out.println(ct+" "+rt);\n int rem=rt-ct;ct=0;\n if(rem==0)\n return ct;\n ct+=rem/60;\n rem=rem%60;\n if(rem==0)\n return ct;\n ct+=rem/15;\n rem%=15;\n if(rem==0)\n return ct;\n ct+=rem/5;\n rem%=5;\n if(rem==0)\n return ct;\n ct+=rem/1;\n rem%=1;\n \n \n return ct;\n }\n}\n``` | 1 | 0 | ['Java'] | 0 |
minimum-number-of-operations-to-convert-time | Python Simple Solution | Easy to understand | Begginners | python-simple-solution-easy-to-understan-2blt | \nclass Solution(object):\n def convertTime(self, current, correct):\n """\n :type current: str\n :type correct: str\n :rtype: in | AkashHooda | NORMAL | 2022-04-04T12:59:53.039684+00:00 | 2022-04-04T12:59:53.039716+00:00 | 80 | false | ```\nclass Solution(object):\n def convertTime(self, current, correct):\n """\n :type current: str\n :type correct: str\n :rtype: int\n """\n l = current.split(":")\n m = correct.split(":")\n c = 0 \n c+=int(m[0])-int(l[0])\n x = int(m[1])-int(l[1])\n if int(m[1])<int(l[1]):\n c-=1\n x = int(m[1])\n x+=60-int(l[1])\n while x>0:\n if x>=15:\n c+=x//15 \n x=x%15 \n\n elif x>=5:\n c+=x//5\n x = x%5\n else:\n c+=x \n x=0 \n return c\n``` | 1 | 0 | ['Python', 'Python3'] | 0 |
minimum-number-of-operations-to-convert-time | greedy and dp solutions in python | greedy-and-dp-solutions-in-python-by-var-peic | \n# \t\tdp \n\t\t\n\t\t@cache\n def solve(i,rem):\n if rem<0:\n return INF\n if i==N:\n if rem==0:\n | dragon469 | NORMAL | 2022-04-04T12:09:47.533208+00:00 | 2022-04-04T12:14:13.062922+00:00 | 111 | false | \n# \t\tdp \n\t\t\n\t\t@cache\n def solve(i,rem):\n if rem<0:\n return INF\n if i==N:\n if rem==0:\n return 0\n return INF\n remcurrent=1+solve(i,rem-nums[i])\n othermove=solve(i+1,rem)\n return min(remcurrent,othermove)\n \n def f(value):\n hh,mm=[int(x) for x in value.split(\':\')]\n return hh*60+mm\n diff=f(correct)-f(current)\n nums=[60,15,5,1]\n INF=10**10\n N=4\n return solve(0,diff)\n# \t\tgreedy\n \n\t\t\n def f(value):\n hh,mm=[int(x) for x in value.split(\':\')]\n return hh*60+mm\n\t\tdiff=f(correct)-f(current)\n cnt=0\n for i in [60,15,5,1]:\n cnt+=diff//i\n diff=diff%i\n return cnt\n | 1 | 0 | ['Dynamic Programming', 'Greedy', 'Python'] | 0 |
minimum-number-of-operations-to-convert-time | Python | Greed towards mins | python-greed-towards-mins-by-sonyd4d-zhfa | \nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n cur_hr,cur_min = map(int,current.split(":"))\n cor_hr,cor_min = | sonyD4d | NORMAL | 2022-04-03T23:34:15.542449+00:00 | 2022-04-03T23:34:15.542556+00:00 | 25 | false | ```\nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n cur_hr,cur_min = map(int,current.split(":"))\n cor_hr,cor_min = map(int,correct.split(":"))\n \n cur_min = cur_min + cur_hr*60\n cor_min = cor_min + cor_hr*60\n \n req = abs(cur_min-cor_min)\n ans = 0\n \n for i in [60,15,5,1]:\n ans+= req//i\n req %= i\n \n return ans\n``` | 1 | 0 | [] | 0 |
minimum-number-of-operations-to-convert-time | Go - 0 ms | go-0-ms-by-marcdidom-eojr | \nfunc convertTime(current, correct string) (op int) {\n diff := minutesConversion(correct) - minutesConversion(current)\n for _, v := range []int{60, 15 | marcdidom | NORMAL | 2022-04-03T22:08:48.488178+00:00 | 2022-04-03T22:08:48.488210+00:00 | 25 | false | ```\nfunc convertTime(current, correct string) (op int) {\n diff := minutesConversion(correct) - minutesConversion(current)\n for _, v := range []int{60, 15, 5, 1} {\n op += diff/v\n diff %= v\n }\n return\n}\n\nfunc minutesConversion(s string) int {\n hours, _ := strconv.Atoi(s[:2])\n minutes, _ := strconv.Atoi(s[3:5])\n hours *= 60\n return hours+minutes\n}\n``` | 1 | 0 | [] | 0 |
minimum-number-of-operations-to-convert-time | Java| Beginner Friendly| Greedy | java-beginner-friendly-greedy-by-emsugan-rouz | This method involves converting the given time into mins. Then we find target = correct - correct. We run a loop to greedily make target as 0 using 60,15,5,1 su | emsugandh | NORMAL | 2022-04-03T18:10:03.033289+00:00 | 2022-04-03T18:21:51.600598+00:00 | 67 | false | This method involves converting the given time into mins. Then we find target = correct - correct. We run a loop to greedily make target as 0 using 60,15,5,1 subtractions. Here the order of subtractions is important as we want to get min operation count. I write a custom function toMins() which converts given time to mins of type int. I dont use split method or fancy java methods in conversion. \nHere is the complete code\n```\nclass Solution {\n public int convertTime(String current, String correct) {\n //current<=correct -> correct-current\n int x = toMins(current);\n int y = toMins(correct);\n int target = y - x;\n int ops = 0;\n while(target>0) {\n if(target>=60)\n target = target - 60;\n else if(target>=15 && target<60)\n target = target - 15;\n else if(target>=5 && target<15)\n target = target - 5;\n else \n target = target - 1;\n ops++;\n }\n return ops;\n \n }\n private int toMins(String s) {\n int i = 0;\n while(s.charAt(i)!=\':\') {\n i++;\n }\n String hours = s.substring(0, i);\n String mins = s.substring(i+1, s.length());\n return Integer.valueOf(hours) * 60 + Integer.valueOf(mins);\n \n }\n}\n``` | 1 | 0 | [] | 2 |
minimum-number-of-operations-to-convert-time | PythonTimeConverter | Tc:O(1) | Sc:O(4) | | pythontimeconverter-tco1-sco4-by-satishn-551l | \nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n currentTime = 60 * int(current[:2]) + int(current[3:]) # Current time | satishnaidu400 | NORMAL | 2022-04-03T15:15:47.522288+00:00 | 2022-04-03T15:15:47.522329+00:00 | 34 | false | ```\nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n currentTime = 60 * int(current[:2]) + int(current[3:]) # Current time in minute\n targetTime = 60 * int(correct[:2]) + int(correct[3:]) # Current time in minutes\n diff = targetTime - currentTime\n count = 0\n for i in [60, 15, 5, 1]:\n count += diff // i # add number of operations needed with i to count\n diff %= i #subtracting the i from diff after counting the steps\n return count\n```\nreference from\n[https://leetcode.com/anCoderr/](http://) | 1 | 0 | ['Math', 'Python3'] | 0 |
minimum-number-of-operations-to-convert-time | Convert everything to minutes | convert-everything-to-minutes-by-shanush-pbvr | Convert hours to minutes.\n Add minutes together and find difference.\n Calculate greedy answer.\n\n\nclass Solution:\n def convertTime(self, current: str, c | shanushan | NORMAL | 2022-04-03T08:08:16.667494+00:00 | 2022-04-04T03:07:58.920936+00:00 | 31 | false | * Convert hours to minutes.\n* Add minutes together and find difference.\n* Calculate greedy answer.\n\n```\nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n current = list(map(int, current.split(\':\')))\n correct = list(map(int, correct.split(\':\')))\n \n current = current[0] * 60 + current[1]\n correct = correct[0] * 60 + correct[1]\n \n diff = correct - current\n ans = 0\n for val in (60, 15, 5, 1):\n while diff >= val:\n ans += 1\n diff -= val\n return ans\n```\n | 1 | 0 | ['Math', 'Greedy', 'Python'] | 0 |
minimum-number-of-operations-to-convert-time | C++ || greedy | c-greedy-by-kr_sk_01_in-89us | \n \n int h1=(curr[1]-\'0\');\n h1=h1+ (curr[0]-\'0\')*10;\n \n int min1=(curr[4]-\'0\');\n min1=min1+ (curr[3]-\'0\')*10;\ | KR_SK_01_In | NORMAL | 2022-04-03T07:32:32.264813+00:00 | 2022-04-03T07:32:32.264844+00:00 | 92 | false | ```\n \n int h1=(curr[1]-\'0\');\n h1=h1+ (curr[0]-\'0\')*10;\n \n int min1=(curr[4]-\'0\');\n min1=min1+ (curr[3]-\'0\')*10;\n \n int min2=(right[4]-\'0\');\n min2=min2+ (right[3]-\'0\')*10;\n \n int h2=(right[1]-\'0\');\n h2= h2 + (right[0]-\'0\')*10;\n \n int ans=0;\n int min;\n if(min2>=min1)\n {\n ans+=(h2-h1);\n min=min2-min1;\n }\n else\n {\n ans+=(h2-h1)-1;\n min=(min2-min1 + 60);\n \n }\n \n while(min!=0)\n {\n if(min>=15)\n {\n min-=15;\n ans++;\n }\n else\n {\n if(min<15 && min>=5)\n {\n min-=5;\n ans++;\n }\n else\n {\n min-=1;\n ans++;\n }\n }\n }\n \n return ans;\n \n }\n``` | 1 | 0 | ['C', 'C++'] | 0 |
minimum-number-of-operations-to-convert-time | Convert to minutes | | Short and Clear | convert-to-minutes-short-and-clear-by-ra-1a18 | \nclass Solution {\npublic:\n int convertTime(string current, string correct) {\n int start_min = ((current[0]-\'0\')*10+(current[1]-\'0\'))*60+(curre | Rai_Utkarsh | NORMAL | 2022-04-03T07:08:02.610692+00:00 | 2022-04-03T07:08:02.610725+00:00 | 68 | false | ```\nclass Solution {\npublic:\n int convertTime(string current, string correct) {\n int start_min = ((current[0]-\'0\')*10+(current[1]-\'0\'))*60+(current[3]-\'0\')*10+(current[4]-\'0\');\n int end_min = ((correct[0]-\'0\')*10+(correct[1]-\'0\'))*60+(correct[3]-\'0\')*10+(correct[4]-\'0\');\n int diff = end_min-start_min, tot=0;\n for(auto x:{60,15,5,1})\n {\n tot+=diff/x;\n diff=max(0,diff-(diff/x)*x);\n }\n return tot;\n }\n};\n``` | 1 | 0 | ['C'] | 1 |
minimum-number-of-operations-to-convert-time | C++ Easy with Explanation | c-easy-with-explanation-by-akshatprogram-8pn1 | \n int convertTime(string current, string correct) {\n int ans = 0,currH=0,currM=0,finH=0,finM=0,diff_min,diff_hr;\n currH = stoi(current.subst | akshatprogrammer | NORMAL | 2022-04-03T07:07:33.459923+00:00 | 2022-04-03T07:07:33.459971+00:00 | 40 | false | \n int convertTime(string current, string correct) {\n int ans = 0,currH=0,currM=0,finH=0,finM=0,diff_min,diff_hr;\n currH = stoi(current.substr(0,2)); // extracting the current hour\n currM = stoi(current.substr(3,2)); // extracting the current minute\n finH = stoi(correct.substr(0,2)); // extracting final/correct hour\n finM = stoi(correct.substr(3,2)); // extracting final/correct minute\n if(currM > finM){\n diff_min = 60 - abs(currM - finM); // if curr minutes are more than calculating diff in minutes by moving 1 hour back\n }\n else{\n diff_min = abs(currM - finM); // else normal diff\n }\n if(currM > finM) currH++; \n diff_hr = abs(currH - finH); \n while(diff_min){ // calculating how much steps to make minutes correct\n if(diff_min >= 15) diff_min -= 15;\n else if(diff_min >= 5) diff_min -= 5;\n else diff_min -= 1;\n ans++;\n }\n \n while(diff_hr){ // calculating how much steps to make hour correct\n diff_hr -= 1;\n ans++;\n }\n \n return ans;\n }\n\n``` | 1 | 0 | ['Math', 'Greedy', 'C'] | 0 |
minimum-number-of-operations-to-convert-time | C++ Convert to Minutes | c-convert-to-minutes-by-slow_code-lwlm | \n* Convert to minutes and find the difference\n* check for every minute and store in result\n\t\n\tclass Solution {\n\tpublic:\n\n\t\tint get(string &s){\n\t\t | Slow_code | NORMAL | 2022-04-03T05:17:27.163972+00:00 | 2022-04-03T05:17:27.164019+00:00 | 19 | false | ```\n* Convert to minutes and find the difference\n* check for every minute and store in result\n```\t\n\tclass Solution {\n\tpublic:\n\n\t\tint get(string &s){\n\t\t\treturn stoi(s.substr(0,2))*60 + stoi(s.substr(3));\n\t\t}\n\n\t\tint convertTime(string current, string correct) {\n\t\t\tint res = 0,diff = get(correct)-get(current);\n\t\t\tfor(auto num:{60,15,5,1}){\n\t\t\t\tres+= diff/num;\n\t\t\t\tdiff%=num;\n\t\t\t}\n\t\t\treturn res;\n\t\t}\n\t}; | 1 | 0 | [] | 0 |
minimum-number-of-operations-to-convert-time | Easy C++ Solution | Converting Time to Minutes | easy-c-solution-converting-time-to-minut-mabu | First Convert the time format to minutes\n2. then try to minimize the no. of operations\n\n\nclass Solution {\npublic:\n int removecol(string s)\n {\n | nik2000 | NORMAL | 2022-04-03T04:42:57.310999+00:00 | 2022-04-03T04:42:57.311028+00:00 | 33 | false | 1. First Convert the time format to minutes\n2. then try to minimize the no. of operations\n\n```\nclass Solution {\npublic:\n int removecol(string s)\n {\n if(s.size() == 4)\n s.replace(1,1,"");\n if(s.size() == 5)\n s.replace(2,1,"");\n \n return stoi(s);\n }\n int convertTime(string current, string correct) {\n \n int time1 = removecol(current);\n int time2 = removecol(correct);\n \n int min1 = (time1/100)*60 + time1%100;\n int min2 = (time2/100)*60 + time2%100; \n \n int op = 0;\n \n vector<int> timarr = {60,15,5,1};\n int diff = min2 - min1;\n for(int i=0; i<timarr.size(); i++)\n {\n op += (diff) / timarr[i];\n diff = diff % timarr[i];\n }\n \n return op;\n }\n};\n```\n\nPLS UPVOTE ME!!! | 1 | 0 | ['C'] | 0 |
minimum-number-of-operations-to-convert-time | 2 lines simple solution || C++ || explanation | 2-lines-simple-solution-c-explanation-by-huxy | No need to convert string characters to integer. Just subtract the corresponding pairs of digits\n Convert time into minutes\n Apply the operations using 60, 15 | arihantjain01 | NORMAL | 2022-04-03T04:37:16.332728+00:00 | 2022-04-03T04:48:58.664218+00:00 | 39 | false | * No need to convert string characters to integer. Just subtract the corresponding pairs of digits\n* Convert time into minutes\n* Apply the operations using 60, 15, 5, 1 as described\n```\nclass Solution {\npublic:\n int convertTime(string current, string correct) {\n int n = ((correct[0]-current[0])*600) + ((correct[1]-current[1])*60) + ((correct[3]-current[3])*10) + correct[4]-current[4];\n return (n / 60) + (n % 60 / 15) + (n % 60 % 15 / 5) + (n % 60 % 15 % 5);\n }\n};\n``` | 1 | 0 | ['C'] | 0 |
minimum-number-of-operations-to-convert-time | Python easy and clear solution | python-easy-and-clear-solution-by-kruzer-uhgq | First we find difference between the hours and minutes as h and m\n2. Convert the difference in hours to minutes by h*60, then add it with the m.\n1. Use a coun | kruzerknight | NORMAL | 2022-04-03T04:22:12.124566+00:00 | 2022-04-03T04:26:49.852506+00:00 | 54 | false | 1. First we find difference between the hours and minutes as ```h``` and `m`\n2. Convert the difference in hours to minutes by `h*60`, then add it with the `m`.\n1. Use a counter variable, here taking it as `count=0` \n1. Given we can add `60,15,5,1 `minutes in a a single step, so to find minimum no steps we check from 60 minutes then 15 then 5 and atlast 1 minute.\n1. If the total difference in minutes is larger than 60, we add the quotient `dit//60` of to count to see how many 60 minutes we have added, and store the remainder in `dit` .\n1. Ex: If we have 75, `75//60 =1 `, so count is increased by 1, then the remainder `15` is stored in dit.\n1. if we have dit as 120, count would be increased by 2, since `120//60=2`.\n1. Same happens for 15, 5 and 1 only the remaining minutes is passed through dit, thus minimizing no:of operations to convert.\nLiked the Solution, Give a Upvote :)\n```\ndef convertTime(self, current: str, correct: str) -> int:\n if current==correct:\n return 0\n h=int(correct[:2])-int(current[:2])\n m=int(correct[3:])-int(current[3:])\n dit=h*60+m\n count=0\n if dit>=60:\n count+=dit//60\n dit=dit%60\n if dit>=15:\n count+=dit//15\n dit=dit%15 \n if dit>=5:\n count+=dit//5\n dit=dit%5 \n if dit>=1:\n count+=dit//1\n dit=dit%1 \n return count\n``` | 1 | 0 | ['Python'] | 0 |
minimum-number-of-operations-to-convert-time | EASY | PYTHON | TWO LINES| | easy-python-two-lines-by-akshayj2-80fg | \nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n final = (int(correct[0:2]) - int(current[0:2]))*60 + int(correct[3:]) | akshayj2 | NORMAL | 2022-04-03T04:19:18.647921+00:00 | 2022-04-03T04:20:10.584248+00:00 | 37 | false | ```\nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n final = (int(correct[0:2]) - int(current[0:2]))*60 + int(correct[3:]) - int(current[3:])\n return final//60 + (final%60)//15 + ((final%60)%15)//5 + (((final%60)%15)%5)//1\n```\n\nBasically convert into minutes and count. | 1 | 0 | [] | 0 |
minimum-number-of-operations-to-convert-time | Easy Java solution by converting to minutes | 2ms | beats 100% | easy-java-solution-by-converting-to-minu-yj4c | Convert the time to minutes and then take the difference.\nDivide difference starting with the highest increment and then take the mod of of difference by incre | annonnym | NORMAL | 2022-04-03T04:11:57.946806+00:00 | 2022-04-03T04:17:17.660282+00:00 | 34 | false | Convert the time to minutes and then take the difference.\nDivide difference starting with the highest increment and then take the mod of of difference by increment to find the next possible increase. \n\n```\n public static int convertTime(String current, String correct) {\n int currentMinute = Integer.parseInt(current.split(":")[0]) * 60 + Integer.parseInt(current.split(":")[1]);\n int correctMinute = Integer.parseInt(correct.split(":")[0]) * 60 + Integer.parseInt(correct.split(":")[1]);\n int diff = correctMinute - currentMinute;\n int count = 0;\n int[] increments = new int[]{60, 15, 5, 1};\n\n for (int increment : increments){\n count += diff / increment;\n diff %= increment;\n }\n return count;\n }\n\n``` | 1 | 0 | ['Java'] | 0 |
minimum-number-of-operations-to-convert-time | C++ easy minutes | c-easy-minutes-by-prilily-taia | \nclass Solution {\npublic:\n int convertTime(string current, string correct) {\n int h1=(int)(current[0]*10+current[1]);\n int h2=(int)(correc | prilily | NORMAL | 2022-04-03T04:04:43.070913+00:00 | 2022-04-03T04:04:43.070955+00:00 | 92 | false | ```\nclass Solution {\npublic:\n int convertTime(string current, string correct) {\n int h1=(int)(current[0]*10+current[1]);\n int h2=(int)(correct[0]*10+correct[1]);\n int m1=(int)(current[3]*10+current[4]);\n int m2=(int)(correct[3]*10+correct[4]);\n \n int hour_diff=h2-h1-1;\n int min_diff=m2+(60-m1);\n \n if(min_diff>=60){\n hour_diff++;\n min_diff=min_diff-60;\n }\n \n if(hour_diff==0 && min_diff==0)return 0;\n \n int ans=0;\n if(hour_diff>0)\n {\n ans+=hour_diff; //add 60 minutes\n }\n while(min_diff!=0){\n if(min_diff-15>=0){\n ans++;\n min_diff-=15;\n }\n else if(min_diff-5>=0){\n ans++;\n min_diff-=5;\n }\n else if(min_diff-1>=0){\n ans++;\n min_diff-=1;\n }\n } \n \n return ans;\n }\n};\n```\n\nPlease let me know how to improve my solution.\n | 1 | 0 | ['C'] | 0 |
minimum-number-of-operations-to-convert-time | Convert to Minutes || Time-O(n) || C++ | convert-to-minutes-time-on-c-by-shishir_-zcf6 | \nclass Solution {\npublic:\n int convertTime(string current, string correct) {\n int ch=0;\n int cm=0;\n int crh=0;\n int crm=0; | Shishir_Sharma | NORMAL | 2022-04-03T04:03:50.499661+00:00 | 2022-04-03T04:09:13.671412+00:00 | 129 | false | ```\nclass Solution {\npublic:\n int convertTime(string current, string correct) {\n int ch=0;\n int cm=0;\n int crh=0;\n int crm=0;\n int count=0;\n ch=stoi(current.substr(0,2));\n cm=stoi(current.substr(3,2));\n \n int csec=ch*60+cm;\n crh=stoi(correct.substr(0,2));\n crm=stoi(correct.substr(3,2));\n int crsec=crh*60+crm;\n \n int req=crsec-csec;\n if(req<0)\n {\n req+=60*24;\n }\n \n while(req>0)\n {\n if(req>=60)\n {\n int left=req%60;\n count+=req/60;\n req=left;\n }\n else if(req>=15)\n {\n int left=req%15;\n count+=req/15;\n req=left;\n }\n else if(req>=5)\n {\n int left=req%5;\n count+=req/5;\n req=left;\n }\n else if(req>=1)\n { count+=req/1;\n req=0;\n }\n }\n return count;\n \n }\n};\n```\n**Like it? Please Upvote ;-)** | 1 | 0 | ['C', 'C++'] | 0 |
minimum-number-of-operations-to-convert-time | [JavaScript] 2224. Minimum Number of Operations to Convert Time | javascript-2224-minimum-number-of-operat-vkes | \n---\n\nWeekly Contest 287\n\n- Q1 answer\n - https://leetcode.com/problems/minimum-number-of-operations-to-convert-time/discuss/1908839/JavaScript-2224.-Mini | pgmreddy | NORMAL | 2022-04-03T04:03:11.468508+00:00 | 2022-04-03T06:49:07.033979+00:00 | 205 | false | \n---\n\n**Weekly Contest 287**\n\n- Q1 answer\n - https://leetcode.com/problems/minimum-number-of-operations-to-convert-time/discuss/1908839/JavaScript-2224.-Minimum-Number-of-Operations-to-Convert-Time\n - **below**\n - difficult, for an easy problem\n- Q2 answer\n - https://leetcode.com/problems/find-players-with-zero-or-one-losses/discuss/1908871/JavaScript-2225.-Find-Players-With-Zero-or-One-Losses\n- Q3 answer\n - https://leetcode.com/problems/maximum-candies-allocated-to-k-children/discuss/1908890/JavaScript-2226.-Maximum-Candies-Allocated-to-K-Children\n\n---\n\nHope it is simple to understand.\n\n---\n\n```\nvar convertTime = function (curr, target) {\n const getmins = (str) => +str.slice(0, 2) * 60 + +str.slice(3, 5); // hh:mm to hh*60 + mm\n\n curr = getmins(curr);\n target = getmins(target);\n let minsDiff = target - curr;\n\n let count = 0;\n for (const mins of [60, 15, 5, 1])\n while (minsDiff >= mins) {\n minsDiff -= mins;\n count++;\n }\n return count;\n};\n```\n\n---\n | 1 | 1 | ['JavaScript'] | 0 |
minimum-number-of-operations-to-convert-time | Python intuitive easy solution | python-intuitive-easy-solution-by-aaryan-3cq5 | \nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n current = current.split(\':\')\n correct = correct.split(\':\') | aaryan13g | NORMAL | 2022-04-03T04:02:38.292839+00:00 | 2022-04-03T04:02:38.292864+00:00 | 85 | false | ```\nclass Solution:\n def convertTime(self, current: str, correct: str) -> int:\n current = current.split(\':\')\n correct = correct.split(\':\')\n minute_diff = int(correct[1]) - int(current[1])\n ops = 0\n if minute_diff >= 0:\n ops += int(correct[0]) - int(current[0])\n else:\n ops += int(correct[0]) - int(current[0]) - 1\n minute_diff += 60\n ops += minute_diff // 15\n minute_diff = minute_diff % 15\n ops += minute_diff // 5\n minute_diff = minute_diff % 5\n ops += minute_diff\n return ops\n``` | 1 | 0 | ['Python'] | 0 |
minimum-number-of-operations-to-convert-time | [Python] Convert to minutes and reduce | python-convert-to-minutes-and-reduce-by-hhpiw | \ndef convertTime(self, current: str, correct: str) -> int:\n x = int(current[:2]) * 60 + int(current[3:])\n y = int(correct[:2]) * 60 + int(corre | SailorMoons | NORMAL | 2022-04-03T04:00:49.231418+00:00 | 2022-04-03T04:01:05.142114+00:00 | 107 | false | ```\ndef convertTime(self, current: str, correct: str) -> int:\n x = int(current[:2]) * 60 + int(current[3:])\n y = int(correct[:2]) * 60 + int(correct[3:])\n res = 0\n diff = abs(x - y)\n while diff > 0:\n if diff >= 60:\n diff -= 60\n elif diff >= 15:\n diff -= 15\n elif diff >= 5:\n diff -= 5\n elif diff >= 1:\n diff -= 1\n res += 1\n return res\n``` | 1 | 0 | [] | 0 |
minimum-number-of-operations-to-convert-time | Better then other. 100% | better-then-other-100-by-dj_crush-sh9q | Complexity
Time complexity:
O(1)
Space complexity:
O(1)
Code | dj_crush | NORMAL | 2025-04-08T18:28:29.670354+00:00 | 2025-04-08T18:28:29.670354+00:00 | 3 | false | # Complexity
- Time complexity:
O(1)
- Space complexity:
O(1)
# Code
```cpp []
class Solution {
public:
int convertTime(string current, string correct) {
int t1 = ConvertTimeToNumber(current);
int t2 = ConvertTimeToNumber(correct);
std::vector<int> t = { 60, 15, 5, 1 };
int c = 0;
for (int i = 0; i < t.size();)
{
t1 += t[i];
++c;
if (t1 > t2)
{
--c;
t1 -= t[i];
++i;
}
}
return c;
}
int ConvertTimeToNumber(const std::string& s)
{
return ((s[0] - '0') * 10 + s[1] - '0') * 60 + (s[3] - '0') * 10 + (s[4] - '0');
}
};
``` | 0 | 0 | ['C++'] | 0 |
minimum-number-of-operations-to-convert-time | Minimum Number of Operations to Convert Time | minimum-number-of-operations-to-convert-vv92x | IntuitionApproachComplexity
Time complexity:
Space complexity:
Code | jyenduri | NORMAL | 2025-04-01T21:19:49.558149+00:00 | 2025-04-01T21:19:49.558149+00:00 | 2 | false | # Intuition
<!-- Describe your first thoughts on how to solve this problem. -->
# Approach
<!-- Describe your approach to solving the problem. -->
# Complexity
- Time complexity:
<!-- Add your time complexity here, e.g. $$O(n)$$ -->
- Space complexity:
<!-- Add your space complexity here, e.g. $$O(n)$$ -->
# Code
```javascript []
/**
* @param {string} current
* @param {string} correct
* @return {number}
*/
var convertTime = function(current, correct) {
let [firstCurrent, secondCurrent] = current.split(':')
firstCurrent= parseInt(firstCurrent)*60
secondCurrent= parseInt(secondCurrent)
const currentTimeInMin =firstCurrent +secondCurrent
let [firstCorrect, secondCorrect] = correct.split(':')
firstCorrect= parseInt(firstCorrect)*60
secondCorrect= parseInt(secondCorrect)
const correntTimeInMin =firstCorrect +secondCorrect
let minutesDifference = correntTimeInMin- currentTimeInMin
let numOfOp = 0
while(minutesDifference != 0){
if(minutesDifference >= 60){
minutesDifference= minutesDifference-60
++numOfOp
}else if(minutesDifference >= 15){
minutesDifference= minutesDifference-15
++numOfOp
}else if(minutesDifference >= 5){
minutesDifference= minutesDifference-5
++numOfOp
}else if(minutesDifference >= 1){
minutesDifference= minutesDifference-1
++numOfOp
}
}
return numOfOp
};
``` | 0 | 0 | ['JavaScript'] | 0 |
minimum-number-of-operations-to-convert-time | Python Solution Most easy to Understand, Beats 100% Time = O(1), Space = O(1), Video Solution 4 min | python-solution-most-easy-to-understand-86r2e | IntuitionSimply convert the given time (both current and correct) to minutes from 00:00 hours as reference. For example, 02:30 is 150 minutes.ApproachNow take t | Parth_Modi24 | NORMAL | 2025-04-01T14:52:32.793674+00:00 | 2025-04-01T14:52:32.793674+00:00 | 4 | false | # Intuition
<!-- Describe your first thoughts on how to solve this problem. -->
Simply convert the given time (both `current` and `correct`) to minutes from `00:00` hours as reference. For example, `02:30` is 150 minutes.
# Approach
<!-- Describe your approach to solving the problem. -->
Now take the difference of `current` and `correct` times. For our testcase 1, we get `diff = 125`. Now our task is to make 125 to 0 using only steps of either `1, 5, 15, 60` and we need to return minimum number of steps needed to achieve this.
Simply we will go greedy and start with maximum step `60` followed by `15`, `5`, `1` in descending order.
For example: `diff`= 125, we can use `60` twice so we reach `120`, now we use simply `5` once and we are done. So steps needed are `3`.
We use `ops = [60, 15, 5, 1]`, and iterate over it and updated our `ans` variable as follows:
- Iter 1: ans = ans + 125//60 # ans = 2
diff = 125%60 = 5
- Iter 2: ans = 2 + 5//15 = 2 # ans = 2 (no change)
diff = 5%15 = 5 (no change)
- Iter 3: ans = 2 + 5//5 = 3
diff = 5%5 = 0
- Iter 4: ans = 3 + 0//1 = 3(no change)
diff = 0%1 = 0 (no change)
At the end we return `ans = 3`
# Complexity
- Time complexity:
<!-- Add your time complexity here, e.g. $$O(n)$$ -->
O(1) we are always iterating 4 times no matter whats the input is!
- Space complexity:
<!-- Add your space complexity here, e.g. $$O(n)$$ -->
O(1) we are using only constant space (one list of 4 elements for ops array and one `diff` variable). Note that `ans` variable (output variable) space is not counted in space complexity calculations.
# Code
```python3 []
class Solution:
def convertTime(self, current: str, correct: str) -> int:
ans = 0
ops = [60, 15, 5, 1]
diff = self.getMinutes(correct) - self.getMinutes(current)
for op in ops:
ans += diff//op
diff %= op
return ans
def getMinutes(self, s: str):
return int(s[0:2]) * 60 + int(s[3:])
``` | 0 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | C++ Tree building and trimming | c-tree-building-and-trimming-by-lzl12463-jf4z | See my latest update in repo LeetCode\n\n## Solution 1. DFS\n\n1. Build Tree: Build a folder tree based on the paths. The process is similar to the Trie buildin | lzl124631x | NORMAL | 2021-07-25T04:03:30.784170+00:00 | 2021-07-25T07:05:09.855018+00:00 | 6,746 | false | See my latest update in repo [LeetCode](https://github.com/lzl124631x/LeetCode)\n\n## Solution 1. DFS\n\n1. **Build Tree**: Build a folder tree based on the `paths`. The process is similar to the Trie building process.\n2. **Dedupe**: Use post-order traversal to visit all the nodes. If we\'ve seen the subfolder structure before, mark the node as deleted.\n3. **Generate Paths**: DFS to generate the output. We skip the nodes that have been deleted.\n\nUpdate 1: To ensure we visit the subfolders in the same order, changed `Node::next` to `unordered_map` to `map`. (Testcase: `[["a"],["a","a"],["a","b"],["a","b","a"],["b"],["b","a"],["b","a","a"],["b","b"]]`. Thanks @wshack)\n\nUpdate 2: To ensure the subfolder structure string only map to a unique tree structure, changed the encoding to use parenthesis instead, e.g. `(root(firstChild)(secondChild)...)`. (Testcase: `[["r","x"],["r","x", "b"],["r","x","b","a"],["r", "y"],["r","y", "a"],["r","y", "b"],["r"]]`. Thanks @Asagi)\n\n**Complexity Analysis**\n\nAssume `N` is the number of folders, `W` is the maximum length of folder name, `D` is the deepest folder depth, and `C` is the maximum number of direct **c**hild folders.\n\n1. **Build Tree**: We need to add all the `N` folders, each of which takes `O(DWlogC)` time. So overall it takes `O(NDWlogC)` time, and `O(NW)` space.\n2. **Dedupe**: We traverse the `N` folders in post-order. The maximum length of `subfolder` structure string is roughly `O(NW)`, so each node visit need `O(NW)` time to check if it\'s a duplicate. The overall time complexity is `O(N^2 * W)` and the space complexity is `O(N^2 * W)`.\n3. **Generate Paths**: In the worst case we traverse the `N` nodes again. Each visit takes `O(W)` time to update the current `path` and `O(DW)` time to update the answer. So overall the time complexity is `O(NDW)` and space complexity is `O(DW)` for the temporary `path`.\n\n```cpp\n// OJ: https://leetcode.com/contest/weekly-contest-251/problems/delete-duplicate-folders-in-system/\n// Author: github.com/lzl124631x\n// Time: O(NDWlogC + N^2 * W) where `N` is the number of folders, `W` is the maximum length of folder name, \n// `D` is the deepest folder depth, and `C` is the maximum number of direct child folders.\n// Space: O(N^2 * W)\nstruct Node {\n string name;\n map<string, Node*> next; // mapping from folder name to the corresponding child node.\n bool del = false; // whether this folder is deleted.\n Node(string n = "") : name(n) {}\n};\nclass Solution {\n void addPath(Node *node, vector<string> &path) { // Given a path, add nodes to the folder tree. This is similar to the Trie build process.\n for (auto &s : path) {\n if (node->next.count(s) == 0) node->next[s] = new Node(s);\n node = node->next[s];\n }\n }\n unordered_map<string, Node*> seen; // mapping from subfolder structure string to the first occurrence node.\n string dedupe(Node *node) { // post-order traversal to dedupe. If we\'ve seen the subfolder structure before, mark it as deleted.\n string subfolder;\n for (auto &[name, next] : node->next) {\n subfolder += dedupe(next);\n }\n if (subfolder.size()) { // leaf nodes should be ignored\n if (seen.count(subfolder)) { // if we\'ve seen this subfolder structure before, mark them as deleted.\n seen[subfolder]->del = node->del = true;\n } else {\n seen[subfolder] = node; // otherwise, add the mapping\n }\n }\n return "(" + node->name + subfolder + ")"; // return the folder structure string of this node.\n }\n vector<vector<string>> ans;\n vector<string> path;\n void getPath(Node *node) {\n if (node->del) return; // if the current node is deleted, skip it.\n path.push_back(node->name);\n ans.push_back(path);\n for (auto &[name, next] : node->next) {\n getPath(next);\n }\n path.pop_back();\n }\npublic:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& A) {\n Node root;\n for (auto &path : A) addPath(&root, path);\n dedupe(&root);\n for (auto &[name, next] : root.next) getPath(next);\n return ans;\n }\n};\n``` | 78 | 0 | [] | 20 |
delete-duplicate-folders-in-system | Java Solution with Explanation | java-solution-with-explanation-by-profch-ftj5 | First build a tree of folders as shown in the problem description\n\nNext generate a key for each node in the tree\nThe key should be a concatenation of the key | profchi | NORMAL | 2021-07-25T04:01:47.287387+00:00 | 2021-07-25T04:01:47.287435+00:00 | 4,418 | false | First build a tree of folders as shown in the problem description\n\nNext generate a key for each node in the tree\nThe key should be a concatenation of the keys of all it\'s children. It is important to sort all it\'s children before concatenating the key. If the insertion order is a/x , a/y , b/y, b/x. This would ensure that the key generated for both a and b is key(x) + key(y) in that order;\nWhile genrating the key we also add the keys to a global map\n\nAfter that perform another transveral and set the delete flag of nodes that are not leafs and their keys are duplicate\n\nFinally for each path initially given, if it\'s possible to go to the end of the path without passing a node whose deleted flag has been set, add it to the result\n\nPlease upvote if you like the solution :) \n```\n\nclass Solution {\n Folder root = new Folder("");\n Map<String, Integer> keys = new HashMap<>();\n \n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n for (List<String> path : paths){\n addPath(path);\n }\n \n for (Folder f : root.list){\n generateKey(f);\n }\n \n for (Folder f : root.list){\n updateDeleteStatus(f);\n }\n \n List<List<String>> results = new ArrayList<>();\n \n for (List<String> path : paths){\n if (isValid(path))\n results.add(path);\n }\n \n return results;\n }\n \n private boolean isValid(List<String> path){\n Folder current = root;\n \n for (String f : path){\n current = current.map.get(f);\n \n if (current.del)\n return false;\n }\n \n return true;\n }\n \n private void updateDeleteStatus(Folder f){\n if (f.list.size() > 0 && keys.get(f.key) > 1){\n f.del = true;\n return;\n }\n \n for (Folder fold : f.list){\n updateDeleteStatus(fold);\n }\n }\n \n private String generateKey(Folder fold){\n StringBuilder sb = new StringBuilder();\n \n if (fold.list.size() == 0)\n return sb.toString();\n \n // sort to order matches\n Collections.sort(fold.list, (a, b) -> a.name.compareTo(b.name));\n \n for (Folder f : fold.list){\n sb.append(\'(\');\n sb.append(f.name + generateKey(f));\n sb.append(\')\');\n }\n \n String key = sb.toString();\n fold.key = key;\n keys.put(key, keys.getOrDefault(key, 0) + 1);\n \n return key;\n }\n \n private void addPath(List<String> path){\n Folder current = root;\n \n for (String f : path){\n if (!current.map.containsKey(f)){\n Folder fold = new Folder(f);\n current.map.put(f, fold);\n current.list.add(fold);\n }\n \n current = current.map.get(f);\n }\n }\n}\nclass Folder{\n String name;\n Map<String, Folder> map;\n List<Folder> list;\n String key;\n boolean del;\n \n Folder(String name){\n this.name = name;\n map = new HashMap<>();\n list = new ArrayList<>();\n key = "";\n del = false;\n }\n}\n``` | 71 | 1 | [] | 8 |
delete-duplicate-folders-in-system | [Python] Serialize subtrees + complexity analysis, explained | python-serialize-subtrees-complexity-ana-k23z | In this problem we need to use problem constrains, especially this one 1 <= sum(paths[i][j].length) <= 2 * 10^5, which is in fact quite powerful: it means that | dbabichev | NORMAL | 2021-07-25T08:12:27.529760+00:00 | 2021-07-26T07:22:56.543415+00:00 | 3,461 | false | In this problem we need to use problem constrains, especially this one `1 <= sum(paths[i][j].length) <= 2 * 10^5`, which is in fact quite powerful: it means that we can not have very long level tree, so tree in some sense balanced more or less. The idea is to hash all possible subtrees.\n\n1. `dfs1(node)` is recursive function, which will create serialization of tree in special form, for example for tree ` paths = [["a"],["c"],["d"],["a","b"],["c","b"],["d","a"]]`, for root we will have `(a(b())c(b())d(a()))`: the structure is the following: we have `(a(b())` + `c(b())` + `d(a()))`, where each part is serialization of subtree. This function will return us serialization of **all** non-empty subtrees (if it is empty, we do not need it).\n2. Also in our `Node` structure we have flag `dl`, which says us if node was deleted or not: instead of deleting node it is easier for me to change this flag. So, function `dfs2` used to traverse tree and collect all paths in recursive way. Note, that we collect only paths which were not deleted.\n3. Also we need to create tree (actually we do it in the beginning): we iterate over all `path in sorted(paths)` and attach nodes, like we did in classical trie. Note that we need to sort our paths, so the data for the same folder will be hashed in the same way.\n4. Then we run `dfs1(root)`.\n5. After we run `dfs1`, we have dictionaries of hashes `pattern`, so if we see that for some pattern it happens more than two times, we mark `.dl` field in our trie as `True`: it means, that this node was deleted (lazily)\n6. Finally, we just run `dfs2(root, [])` and return `ans`.\n\n#### Compleixity\nDefine by `M` is total number of elements in `paths[i][j]` for every `i` and `j`. Then if we for the moment we assume that each folder name has `O(1)` length, then we can show that total length of hashes is `O(M)` as well! The logic is the following: Imagine some node **X** in our tree. Then how many times we will meet it in our pathes is equal to number of nodes in subtree for this node! Now, if we remember than each folder name can be upto `k <= 10` size, then we have complexity `O(M*k)` to build hashes. Also we have the same complexity to run `dfs2`: total length of paths can not be more that it was in the beginning, we removed some of them. Also we have a part where we sort `paths`, it has complexity `O(X*log(T))`, where `X = sum(paths[i][j].length)` and `T` is length of paths. However it is quite loose bound, and in practice it works very fast, due to fast implementation of sort function in python.\n\nThanks @617280219 for pointing out my error in previous analysis. Indeed, if we consider the case of tree:\n`[a1] -> [a2] -> ... -> [a_sqrt(M)] -> [b1b2...bk]`, then:\n1. Sum of all numbers of elements in `paths[i][j]` will be `1 + 2 + .. + (sqrt(M)+1) = O(M)`.\n2. All hashes will take `k + k + 1 + ... + k + sqrt(M) = M + k*sqrt(M)`, so we have extra `k*sqrt(M)` factor here.\n\nWe can make this example even worse, where we have example for `Omega(M*k)` complexity.\n\n\n\n\n#### Code\n```python\nclass Node:\n def __init__(self):\n self.child = defaultdict(Node)\n self.dl = False\n\nclass Solution:\n def deleteDuplicateFolder(self, paths):\n def dfs1(node):\n key = "(" + "".join(c + dfs1(node.child[c]) for c in node.child) + ")"\n if key != "()": pattern[key].append(node)\n return key\n \n def dfs2(node, path):\n for c in node.child:\n if not node.child[c].dl:\n dfs2(node.child[c], path + [c])\n if path: ans.append(path[:])\n \n pattern, root, ans = defaultdict(list), Node(), []\n \n for path in sorted(paths):\n node = root\n for c in path: node = node.child[c]\n \n dfs1(root)\n \n for nodes in pattern.values():\n if len(nodes) > 1:\n for i in nodes: i.dl = True\n \n dfs2(root, [])\n return ans\n```\n\nIf you have any questions, feel free to ask. If you like solution and explanations, please **Upvote!** | 64 | 3 | ['Depth-First Search'] | 4 |
delete-duplicate-folders-in-system | [Python] Trie & Serialize subtrees - Clean & Concise | python-trie-serialize-subtrees-clean-con-ir1d | Idea\n- Use TrieNode to create the graph of folder structure.\n- Then dfs on our graph and serialize subtrees as strings, then add nodes which has the same seri | hiepit | NORMAL | 2021-07-27T04:43:17.042906+00:00 | 2021-07-27T05:53:43.675219+00:00 | 2,180 | false | **Idea**\n- Use `TrieNode` to create the graph of folder structure.\n- Then dfs on our graph and serialize subtrees as strings, then add nodes which has the same serialize string into a dictionary, let name it `seen`.\n- Traverse the `seen` dictionary, if nodes has the same serialize string, then it\'s duplicated subtree, we mark those nodes as `deleted`.\n- Traverse the whole graph again, if the node is not deleted, we add the paths to our result `ans`.\n\n```python\nclass TrieNode:\n def __init__(self):\n self.child = defaultdict(TrieNode)\n self.deleted = False\n def addWord(self, word):\n curr = self\n for c in word: curr = curr.child[c]\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n def serialize(root, seen):\n if not root.child: return ""\n keys = []\n for c, child in root.child.items():\n keys.append(c + ":" + serialize(child, seen))\n key = "(" + "".join(keys) + ")"\n seen[key].append(root)\n return key\n\n def dfsGetValidPaths(root, path, out):\n for c, child in root.child.items():\n if not child.deleted:\n out.append(path + [c])\n dfsGetValidPaths(child, path + [c], out)\n\n trieRoot, seen = TrieNode(), defaultdict(list)\n # Create graph\n for path in sorted(paths): # Sort paths to get correct orders of children of a subtree\n trieRoot.addWord(path)\n\n # Find duplicated subtrees\n serialize(trieRoot, seen)\n\n # Mark duplicated subtree nodes as deleted\n for nodes in seen.values():\n if len(nodes) >= 2:\n for node in nodes: # Mark duplicated subtrees as deleted\n node.deleted = True\n\n ans = []\n dfsGetValidPaths(trieRoot, [], ans)\n return ans\n``` | 39 | 0 | [] | 2 |
delete-duplicate-folders-in-system | C++ | Short step by step easy to understand | Build a Trie | c-short-step-by-step-easy-to-understand-gnlw4 | Note: I could not solve this completely during contest. Try to grasp it one step at a time.\n\nAlgorithm\n1. We will be building a trie. Every node of the trie | shourabhpayal | NORMAL | 2021-07-25T06:47:44.645363+00:00 | 2021-07-27T07:42:46.034115+00:00 | 1,641 | false | **Note**: I could not solve this completely during contest. Try to grasp it one step at a time.\n\n**Algorithm**\n1. We will be building a trie. Every node of the trie will store :\n\t```\n\tend -> Is the path ending here\n\texclude -> should this be excluded/pruned while constructing the final answer\n\tname -> name of the directory\n\tchild -> the children we have for this node\n\t```\n1. Using ```paths``` contruct the Trie with help of ```insert``` function.\n2. Perform ```dfs``` on Trie and obtain ```childPath``` which is a serialization of the subtree rooted at current node excluding current node.\n3. Record the current node as parent of ```childPath``` in ```parentMap```.\n4. For every key in ```parentMap```, mark ```exclude = true``` for all nodes if size of value (vector of parents) > 1 (Their subtrees have occured more than once and hence need to be excluded).\n5. Use ```getans``` to build our answer. (Here we check if ```root->end = true```)\n\n**Note:** We use ```map<string, node*> child``` instead of ```unordered_map<string, node*> child``` to guarantee correct serialization. Thanks @chenjun15\n\n**Code**\n```\nclass node{\n public: \n map<string, node*> child;\n bool end;\n bool exclude;\n string name;\n node(string fname){\n exclude = false;\n end = false;\n name = fname;\n }\n};\n\nclass Solution {\npublic:\n node *root;\n unordered_map<string, vector<node*>> parentMap;\n vector<vector<string>> ans;\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n root = new node("/");\n for(int i = 0 ; i < paths.size(); i++) insert(root, paths[i], 0);\n dfs(root);\n for(auto &e : parentMap)\n if(e.second.size() > 1)\n for(auto &parent : e.second)\n parent->exclude = true;\n vector<string> temp;\n getans(root, temp);\n return ans;\n }\n \n void getans(node *root, vector<string> &temp){\n if(root->exclude) return;\n if(root->name != "/") temp.push_back(root->name);\n if(root->end) ans.push_back(temp);\n for(auto &e : root->child)\n getans(e.second, temp);\n if(root->name != "/") temp.pop_back();\n }\n \n string dfs(node *root){\n string childPath = "";\n for(auto &e : root->child){\n childPath += dfs(e.second);\n childPath += "|";\n }\n if(childPath != "") parentMap[childPath].push_back(root);\n return root->name + "|" + childPath;\n }\n \n void insert(node *root, vector<string> &path, int pos){\n if(pos < path.size()){\n if(root->child.count(path[pos]) == 0) root->child[path[pos]] = new node(path[pos]);\n insert(root->child[path[pos]], path, pos+1);\n }\n else if(pos == path.size()) root->end = true;\n }\n};\n``` | 18 | 0 | ['Trie', 'C'] | 1 |
delete-duplicate-folders-in-system | Clean Java | clean-java-by-rexue70-qis9 | please see @Ericxxt \'s answer in the comment, lc has added more testcases and below answer cannot pass.\n\nwe have 4 steps to do this task\n\n1. add all the pa | rexue70 | NORMAL | 2021-07-25T04:53:37.027137+00:00 | 2021-08-04T09:21:49.343627+00:00 | 1,264 | false | please see @Ericxxt \'s answer in the comment, lc has added more testcases and below answer cannot pass.\n\nwe have 4 steps to do this task\n\n1. add all the path to Trie (if you are not familiar with this, please refer to https://leetcode.com/problems/implement-trie-prefix-tree/)\n2. we generate hashkey for each Trie Node, hashKey is all children name this node has, we just comcatenate all its children\'s name together\n3. we use hashmap to see if anyNode hashKey appear more than once, remove it from the Trie\n4. convert back Trie to individual path list, this use normal backtracking\n\n```\nclass Solution {\n Trie root = new Trie();\n Map<String, Integer> map = new HashMap<>();\n List<List<String>> res = new ArrayList<>();\n \n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n\n //add to Trie\n addToTrie(root, paths);\n\n //add hashKey use suffix\n generateHash(root);\n \n //remove duplicate path\n remove(root);\n \n //convert back\n convertBack(root, new ArrayList<>());\n \n return res;\n }\n \n private void addToTrie(Trie root, List<List<String>> paths) {\n for (List<String> path : paths) {\n Trie cur = root;\n for (String s : path) {\n if (!cur.children.containsKey(s)) cur.children.put(s, new Trie(s));\n cur = cur.children.get(s);\n }\n }\n }\n \n private void convertBack(Trie cur, List<String> level) {\n if (level.size() != 0) res.add(new ArrayList<>(level));\n for (String childName : cur.children.keySet()) {\n level.add(childName);\n Trie child = cur.children.get(childName);\n convertBack(child, level);\n level.remove(level.size() - 1);\n }\n }\n \n private void remove(Trie cur) {\n if (cur.children.size() == 0) return;\n List<String> toDelete = new ArrayList<>();\n for (String childName : cur.children.keySet()) {\n Trie child = cur.children.get(childName);\n remove(child);\n if (map.containsKey(child.key) && map.get(child.key) > 1) toDelete.add(childName);\n }\n for (String s : toDelete) cur.children.remove(s);\n }\n \n private String generateHash(Trie cur) {\n StringBuilder sb = new StringBuilder();\n if (cur.children.size() == 0) return sb.toString();\n for (String childName : cur.children.keySet()) {\n Trie child = cur.children.get(childName);\n generateHash(child);\n sb.append(childName).append(child.key);\n }\n cur.key = sb.toString();\n map.put(cur.key, map.getOrDefault(cur.key, 0) + 1);\n return cur.key;\n }\n}\n\nclass Trie {\n String name;\n String key;\n Map<String, Trie> children = new HashMap<>();\n public Trie(){};\n public Trie(String name) {\n this.name = name;\n }\n}\n``` | 13 | 1 | [] | 1 |
delete-duplicate-folders-in-system | Python3. Hash every subtree | python3-hash-every-subtree-by-yaroslav-r-bc0x | \nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n tree = {}\n child_hashes = defaultdict(list)\ | yaroslav-repeta | NORMAL | 2021-07-25T04:06:22.047835+00:00 | 2021-07-26T04:35:08.335921+00:00 | 1,485 | false | ```\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n tree = {}\n child_hashes = defaultdict(list)\n\t\t\n\t\t# build directories tree\n for path in paths:\n node = tree\n for folder in path:\n if folder not in node:\n node[folder] = {}\n node = node[folder]\n \n\t\t# iterate all children in sorted order and hash them as tuple,\n\t\t# then add current node key to hash and return\n def dfs(node, node_key, parent):\n child_tuple = tuple(dfs(node[key], key, node) for key in sorted(node.keys()))\n child_hash = hash(child_tuple)\n if child_tuple:\n child_hashes[child_hash].append((parent, node_key))\n return hash((child_hash, node_key))\n\n dfs(tree, None, None)\n\n\t\t# find all subtree with the same hash and delete them from the tree\n for duplicates in child_hashes.values():\n if len(duplicates) > 1:\n for parent, node_key in duplicates:\n del parent[node_key]\n\n\t\t# simple dfs to collect all "root -> ... -> leaf" paths\n def dfs_collect_paths(node, current, res):\n for key in node.keys():\n res.append(current + [key])\n dfs_collect_paths(node[key], current + [key], res)\n return res\n return dfs_collect_paths(tree, [], [])\n```\n\nActually it works even faster without hashing:\n```\ndef dfs(node, node_key, parent):\n child_tuple = tuple(dfs(node[key], key, node) for key in sorted(node.keys()))\n if child_tuple:\n child_hashes[child_tuple].append((parent, node_key))\n return (child_tuple, node_key)\n```\nThough I was afraid to use huge tuples during contest. | 13 | 1 | [] | 1 |
delete-duplicate-folders-in-system | Hash Map of Serialized Tree Traversals | hash-map-of-serialized-tree-traversals-b-y0y8 | I think this can be solved using a suffix match, but it did not work - we need to account for the tree topology.\n\nSo, we build a tree first. Now, we can recur | votrubac | NORMAL | 2021-07-25T05:37:09.634271+00:00 | 2021-07-30T06:04:07.703578+00:00 | 1,881 | false | I think this can be solved using a suffix match, but it did not work - we need to account for the tree topology.\n\nSo, we build a tree first. Now, we can recursively compare every node, but that would be tedious and, perhaps, too long. \n\nInstead we can traverse every node (DFS) and build serialized version of this traversal (e.g. "a/b/c/d"). We can then use a hashmap to lookup nodes by their serialized traversals. If we encounter the same traversal again - we can mark both nodes (the current one, and the one in the hash map) as deleted.\n\n> Note: the solution below was updated to pass the newly added test cases. We need to use a `map` instead of `unordered_map` to guarantee the same order while traversing.\n\n> The credit for the idea goes to [lzl124631x](https://leetcode.com/lzl124631x/).\n\n**C++**\n```cpp\nstruct Node {\n bool deleted = false;\n string name;\n map<string, Node*> ch;\n Node(string name = "") : name(name) {};\n void insert(vector<string> &path, int i = 0) {\n if (i < path.size()) {\n auto it = ch.find(path[i]);\n if (it == end(ch))\n it = ch.insert({path[i], new Node(path[i])}).first;\n it->second->insert(path, i + 1);\n }\n }\n string trim(unordered_map<string, Node*> &m) {\n string path;\n for (auto [name, node] : ch)\n path += node->trim(m);\n if (!path.empty()) {\n auto it = m.find(path);\n if (it != end(m))\n it->second->deleted = deleted = true;\n else\n m[path] = this;\n }\n return "(" + name + path + ")";\n }\n void build(vector<vector<string>>& paths, vector<string> &path) {\n if (!deleted) {\n if (!path.empty())\n paths.push_back(path);\n for (auto [name, node] : ch) {\n path.push_back(name);\n node->build(paths, path);\n path.pop_back();\n }\n }\n }\n};\nvector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n Node r;\n for (auto &path : paths)\n r.insert(path);\n paths.clear();\n r.trim(unordered_map<string, Node*>() = {});\n r.build(paths, vector<string>() = {});\n return paths;\n}\n``` | 11 | 2 | [] | 4 |
delete-duplicate-folders-in-system | [Python3] serialize sub-trees | python3-serialize-sub-trees-by-ye15-z1gv | \n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n paths.sort()\n \n tree = {"#": -1}\n | ye15 | NORMAL | 2021-07-25T04:01:52.512739+00:00 | 2021-07-26T18:56:13.923684+00:00 | 1,049 | false | \n```\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n paths.sort()\n \n tree = {"#": -1}\n for i, path in enumerate(paths): \n node = tree\n for x in path: node = node.setdefault(x, {})\n node["#"] = i\n \n seen = {}\n mark = set()\n \n def fn(n): \n """Return serialized value of sub-tree rooted at n."""\n if len(n) == 1: return "$" # leaf node \n vals = []\n for k in n: \n if k != "#": vals.append(f"${k}${fn(n[k])}")\n hs = "".join(vals)\n if hs in seen: \n mark.add(n["#"])\n mark.add(seen[hs])\n if hs != "$": seen[hs] = n["#"]\n return hs\n \n fn(tree)\n \n ans = []\n stack = [tree]\n while stack: \n n = stack.pop()\n if n["#"] >= 0: ans.append(paths[n["#"]])\n for k in n: \n if k != "#" and n[k]["#"] not in mark: stack.append(n[k])\n return ans \n``` | 6 | 1 | ['Python3'] | 2 |
delete-duplicate-folders-in-system | [Java] Trie + PostOrder + DFS | java-trie-postorder-dfs-by-nickavenger-lp9u | Three steps process:\n1. Construct a Trie-like structure. Using map for children node since path is a string, not just a single character.\n\n2. PostOrder - Not | NickAvenger | NORMAL | 2021-08-10T01:29:20.900469+00:00 | 2021-08-10T01:29:20.900511+00:00 | 624 | false | Three steps process:\n1. Construct a Trie-like structure. Using map for children node since path is a string, not just a single character.\n\n2. PostOrder - Note it down. For duplication detection in tree, postOrder is the key to get substructure. \n\ta. The substructure can be encoded any way you want. The easiest one is to use \'(\' and \')\' to denote levels. a (b, c,d (...),) (Note: my solution has an ending \',\' in the children list but not really matter.)\n\tb. The important here is the order of node\'s children. For example \'a(b,c)\' vs \'a(c,b)\'. Use TreeMap for node.children to work around it. \n\tc. Record substructure int a map<String, Node> with key is a serialized structure string.\n\tc. Mark the node.dup = true if its substructure has seen earlier. \n\n3. DFS and stop at nodes which node.dup=true.\n\n\n```\nclass Solution {\n Node root = new Node("*");\n Map<String, Node> map= new HashMap<>();\n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n constructTrie(paths);\n postOrder(root);\n List<List<String>> res = new ArrayList<>();\n dfs(res, root, new ArrayList<String>());\n return res;\n }\n \n public String postOrder(Node node) {\n StringBuffer s = new StringBuffer();\n for (Node cn : node.children.values()) {\n s.append(postOrder(cn));\n s.append(",");\n }\n\n String key = s.toString();\n if (s.length() == 0) return node.val + key; // leaf node\n if (map.containsKey(key)) {\n Node orig = map.get(key);\n orig.dup = true;\n node.dup = true;\n } else {\n map.put(key, node);\n }\n \n return node.val + "(" + key + ")";\n }\n \n public void dfs(List<List<String>> res, Node node, List<String> str) {\n if (node == null) return;\n for(Node cn : node.children.values()) {\n if (cn.dup) continue;\n str.add(cn.val);\n List<String> result = new ArrayList<>();\n result.addAll(str);\n res.add(result);\n dfs(res, cn, str);\n str.remove(str.size()-1);\n }\n }\n \n public void constructTrie(List<List<String>> paths) {\n for (List<String> path : paths) {\n Node cur = root;\n for (String ss : path) {\n cur = cur.children.computeIfAbsent(ss, s -> new Node(s));\n }\n }\n }\n}\n\nclass Node {\n Map<String, Node> children;\n String val;\n boolean dup;\n public Node(String val) {\n children = new TreeMap<String, Node>();\n this.val = val;\n dup = false;\n }\n}\n\n``` | 5 | 0 | [] | 0 |
delete-duplicate-folders-in-system | C++ || Trie || HashMap || OOPs || Serialization | c-trie-hashmap-oops-serialization-by-luc-5oww | \n#define F first\n#define S second\n\nclass Trie {\nprivate:\n struct trieNode{\n map<string,trieNode*> child;\n string word="";\n }; \n | lucifer_99 | NORMAL | 2021-07-28T13:38:35.082202+00:00 | 2021-07-28T13:38:35.082344+00:00 | 762 | false | ```\n#define F first\n#define S second\n\nclass Trie {\nprivate:\n struct trieNode{\n map<string,trieNode*> child;\n string word="";\n }; \n trieNode* root;\n vector<vector<string>> res;\n unordered_map<string,vector<trieNode*>> m1;\n unordered_set<trieNode*> vis;\npublic: \n Trie() {\n root=new trieNode();\n }\n \n /** Inserts a word into the trie. */\n void insert(vector<string> &word) {\n trieNode* temp=root;\n for(auto &i: word){\n string ch=i;\n if(temp->child.find(ch)== temp->child.end()){\n temp->child[ch]= new trieNode();\n }\n temp=temp->child[ch];\n }\n \n }\n void serialize() {\n trieNode* temp=root;\n \n for(auto &i:temp->child){\n serialize(i.S,i.F);\n }\n }\n \n //Encode the subtree . Just make sure to use different delimiters i.e \'$\' for combining encoded string of childs and \'#\' for combining encoded string of subtree with parent \n string serialize(trieNode* temp,const string &par) {\n \n string subtree="";\n \n for(auto &i:temp->child){\n\n string child=serialize(i.S,i.F);\n if(child!=""){\n subtree+=child;\n subtree+=\'$\';\n }\n }\n if(!subtree.empty()){\n temp->word=subtree;\n m1[subtree].push_back(temp);\n }\n \n string tt=par;\n tt+=\'#\';\n if(!subtree.empty())\n tt+=subtree;\n return tt;\n \n } \n void mark(){\n for(auto &i:m1){\n if((int)i.S.size()>1){\n for(auto &j:i.S){\n vis.insert(j);\n }\n }\n }\n }\n void solve(trieNode* temp,const string &ch,vector<string> &ans){\n if(vis.find(temp)!=vis.end()){\n return;\n }\n ans.push_back(ch);\n res.push_back(ans);\n for(auto &i: temp->child){\n solve(i.S,i.F,ans);\n }\n ans.pop_back();\n }\n void solve(){\n vector<string> ans;\n trieNode* temp=root;\n for(auto &i:temp->child){\n solve(i.S,i.F,ans);\n }\n }\n \n \n vector<vector<string>> answer(){\n return res;\n }\n \n};\nclass Solution {\npublic:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n Trie* obj= new Trie();\n for(auto &i:paths){\n obj->insert(i);\n }\n obj->serialize();\n obj->mark();\n obj->solve();\n return obj->answer();\n \n }\n};\n``` | 5 | 1 | [] | 1 |
delete-duplicate-folders-in-system | [python] using dictionary of dictionaries (build tree and remove duplicates) | python-using-dictionary-of-dictionaries-j0886 | Approach\nFollowing are the steps to achieving what we need:\n1. Build the directory tree using dictionaries.\n2. For all the input paths which correspond to no | 495 | NORMAL | 2021-07-31T00:28:55.132811+00:00 | 2021-07-31T00:28:55.133278+00:00 | 1,329 | false | ## Approach\nFollowing are the steps to achieving what we need:\n1. Build the directory tree using dictionaries.\n2. For all the input paths which correspond to non-leaf directories, build a subtree signature\n3. Store the parent and the subtree key against this signature in a list so that paths having same subtree signatures will go in the same list. Any such list with length more than 1 are duplicates\n4. Remove these duplicates by popping the child key from the parent dictionary.\n5. Re-construct the paths by flattening the directory tree.\n\n```\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n \n # build dir tree as a dictionary\n root = {}\n for path in paths:\n node = root\n for p in path: \n if p not in node: node[p] = {}\n node = node[p]\n \n # subtree signature... note that subtrees\' order doesn\'t matter\n # so same signature should be generated irrespective of the ordering of subtrees\n sign = lambda node: \'(\' + \'|\'.join([key + sign(node[key]) for key in sorted(node.keys())]) + \')\'\n\t\t\n # derive subdir signature for all non-leaf nodes in the tree\n\t\t# put them in a map of signature to list of nodes\n # every node is represented by a parent and its subtree key\n subtree2node = defaultdict(list)\n for path in paths:\n parent = node = root\n for segment in path: \n parent, node = node, node[segment]\n if node:\n subtree2node[sign(node)].append((segment, parent))\n \n # delete duplicate subtrees\n\t\t# duplicates will have same signature so the corresponding list will have more than one nodes\n for subtree in subtree2node.values():\n if len(subtree) > 1:\n for segment, parent in subtree:\n parent.pop(segment)\n \n # recursively construct all paths from the remaining tree\n def flatten(node, prefix):\n answer = []\n for key in node:\n path = prefix + [key]\n answer.append(path)\n answer.extend(flatten(node[key], path))\n return answer\n \n return flatten(root, [])\n``` | 4 | 0 | [] | 0 |
delete-duplicate-folders-in-system | Twenty-ish LOCs 🤩 | twenty-ish-locs-by-linqmafia-d1ll | "Clean code should read like well-written prose" \u2013 Robert C. Martin\n\nc#\npublic class Solution {\n public IList<IList<string>> DeleteDuplicateFolder(I | linqmafia | NORMAL | 2021-08-01T18:04:49.215387+00:00 | 2021-08-14T08:55:34.701472+00:00 | 373 | false | _"Clean code should read like well-written prose" \u2013 Robert C. Martin_\n\n```c#\npublic class Solution {\n public IList<IList<string>> DeleteDuplicateFolder(IList<IList<string>> paths) {\n var root = new TrieNode();\n var path2node = paths.ToDictionary(p => p, p => root.NodeByPath(p));\n var uniqueNodes = root.Flatten().GroupBy(n => n.Hash).Where(g => g.Count() == 1).SelectMany(g => g).ToHashSet();\n return paths.Where(p => uniqueNodes.Contains(path2node[p].ClosestNotLeaf)).ToList();\n }\n \n class TrieNode {\n private string hash;\n private TrieNode parent;\n private Dictionary<string, TrieNode> children = new Dictionary<string, TrieNode>();\n public TrieNode ClosestNotLeaf => children.Any() ? this : parent;\n public string Hash => hash ?? (hash = string.Concat(from c in children orderby c.Key select (c.Key, c.Value.Hash)));\n public IEnumerable<TrieNode> Flatten() => children.Values.SelectMany(n => n.Flatten()).Prepend(this);\n public TrieNode NodeByPath(IList<string> path, int i = 0) {\n if (i == path.Count) return this;\n children.TryAdd(path[i], new TrieNode { parent = this });\n return children[path[i]].NodeByPath(path, i + 1);\n }\n }\n}\n``` | 3 | 0 | [] | 0 |
delete-duplicate-folders-in-system | C++ solution with hash tree (Merkle tree) | c-solution-with-hash-tree-merkle-tree-by-qfi4 | Suprisingly this idea comes not from any algorithm books, but from a system design interview book where they talk about using hash tree to efficiently sync data | tommytsang308 | NORMAL | 2021-07-25T19:58:55.685570+00:00 | 2021-07-25T19:58:55.685617+00:00 | 779 | false | Suprisingly this idea comes not from any algorithm books, but from a system design interview book where they talk about using hash tree to efficiently sync databases. They use has tree to find which data block differs between databases and sync only those data blocks, instead of updating the entire database.\n\nIt is the first time I\'ve ever wrote a hash tree. There is no way I could have finished this in time during the competition\nand I do not handle collisions. It works for now but in the future when more test cases are included then my hash won\'t work anymore.\n\n\n\n class Solution {\n public:\n\n typedef long long ll;\n const ll POWER = 31;\n const ll MOD = 1000000007;\n struct Node{\n vector<string> path;\n vector<Node*> fChildren;\n int id = -1;\n int nNodes = 0;\n ll hashChildren = 0;\n };\n\t\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n sort(paths.begin(), paths.end()); // e.g. prevent ["a", "x", "y"] from appearing before ["a", "x"]\n unordered_map<ll, Node*> hashToNodes;\n auto root = new Node;\n for(int i = 0; i < paths.size(); ++i) {\n ll radix = 1;\n ll hashParent = 0, hash;\n \n // hash of parent\'s path (excluding itself)\n for(int j = 0; j < paths[i].size() - 1; ++j) \n for(auto ch : paths[i][j]) \n hashParent = (hashParent*POWER + (ch - \'a\' + 1)) % MOD; \n \n // hash if the entire path (including itself)\n hash = hashParent;\n for(auto ch : paths[i].back())\n hash = (hash*POWER + (ch - \'a\' + 1)) % MOD; \n \n auto node = new Node{paths[i]};\n node -> id = i;\n // find parent node\n if(hashToNodes.find(hashParent) != hashToNodes.end()) {\n hashToNodes[hashParent] -> fChildren.push_back(node);\n } else root -> fChildren.push_back(node); // if parent node is not found, append to root\n \n // save node tables\n hashToNodes[hash] = node;\n }\n \n // fill the "hashChildren" field of each node\n fillHash(root);\n root = removeNode(root);\n vector<vector<string>> ans;\n getElements(root, ans);\n return ans;\n }\n \n void getElements(Node* node, vector<vector<string>>& ans) {\n if(node -> path.size() > 0) ans.push_back(node -> path);\n for(auto child : node -> fChildren)\n if(child) getElements(child, ans);\n }\n \n ll fillHash(Node* node) {\n ll power = 1;\n ll hash = 0;\n node -> nNodes = 1;\n for(auto child : node -> fChildren) {\n ll temp = fillHash(child);\n hash = (hash + temp*power) % MOD;\n node -> nNodes += child -> nNodes;\n power = (power*powMod(POWER, child -> nNodes)) % MOD;\n }\n // sum of hashes from all children\n node -> hashChildren = hash;\n hashCount[hash]++;\n \n // sum of hashes from all children + itself\n if(node -> path.size() > 0)\n for(auto& ch : node -> path.back())\n hash = (hash*POWER + (ch - \'a\' + 1)) % MOD;\n return hash;\n }\n \n Node* removeNode(Node *node) {\n if(!node) return nullptr;\n // remove nodes where the hash appears more than once\n // node -> hashChildren > 0 because all leaves has hashChildren = 0. Don\'t want to remove that\n if(node -> hashChildren > 0 && hashCount[node -> hashChildren] > 1) return nullptr;\n for(int i = 0; i < node -> fChildren.size(); ++i) {\n node -> fChildren[i] = removeNode(node -> fChildren[i]);\n }\n return node;\n }\n \n ll powMod(int pow, int ind) {\n if(ind == 0) return 1;\n if(ind == 1) return pow;\n ll res = powMod(pow, ind/2) % MOD;\n if(ind % 2 == 0) return (res*res) % MOD;\n else return (((res*res) % MOD)*pow) % MOD;\n }\n \n unordered_map<ll, int> hashCount;\n };\n | 3 | 0 | [] | 1 |
delete-duplicate-folders-in-system | Serialized Tree Traversal + Hashmap | Clean and clear solution | serialized-tree-traversal-hashmap-clean-8673y | Main idea is simple: \n\n1) Build: Build the tree using Trie. Since the nodes are not simply characters but strings so use unordered_map instead of array.\n2) | divyalok_20 | NORMAL | 2021-07-25T06:02:43.582786+00:00 | 2021-07-27T03:53:00.438839+00:00 | 472 | false | Main idea is simple: \n\n1) **Build:** Build the tree using Trie. Since the nodes are not simply characters but strings so use unordered_map instead of array.\n2) **Count:** Run a counter (dfs) function to store the count of similar subtrees.\n3) **Destroy:** Do another dfs to mark the same hash (which occur more than once) as isCleared to not count them while building the answer.\n4) **Collect:** Run a final dfs to store the paths in final answer.\n\nThis implementation is pretty naive but its easy to understand so thought maybe I should post. Any improvements are welcome :)\n\n```\nclass Solution {\npublic:\n class Node {\n public:\n unordered_map<string, Node*> children;\n string val;\n bool isEnd;\n bool isCleared;\n Node(): isEnd(false), isCleared(false) {}\n Node(const string& s): val(s), isEnd(false), isCleared(false) {}\n };\n \n unordered_map<string, int> store;\n string counter(Node* root) { //counter function to count the subtrees\n if(!root) return "";\n \n string hash;\n for(auto& nex: root->children) {\n string temphash = counter(nex.second);\n temphash += nex.first;\n hash += "#" + temphash;\n }\n \n // cout << "In collect | hash: " << hash << endl;\n \n if(hash.size()) // just to get rid of empty hashes\n store[hash]++;\n return hash;\n }\n \n string destroy(Node* root) {\n if(!root) return "";\n \n string hash;\n for(auto& nex: root->children) {\n string temphash = destroy(nex.second);\n temphash += nex.first;\n hash += "#" + temphash;\n }\n \n if(store[hash] > 1) { // mark the root as "isCleared" if the same subtree occurs more than once\n // cout << "In destroy | hash: " << hash << " removing: " << root->val << endl;\n root->isCleared = true;\n }\n \n return hash;\n }\n \n void collect(Node* root, vector<string>& temp, vector<vector<string>>& ans) {\n if(!root) return;\n if(root->isCleared) return;\n \n if(root->children.size() == 0) {\n if(temp.size())\n ans.push_back(temp);\n return;\n }\n \n if(root->isEnd) //only push the current path in if this path exists in paths\n ans.push_back(temp);\n \n for(auto& nex: root->children) {\n temp.push_back(nex.first);\n collect(nex.second, temp, ans);\n temp.pop_back();\n }\n }\n \n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n Node* root = new Node("\\\\");\n \n\t\t//build the tree\n for(auto& path: paths) {\n Node* temp = root;\n for(auto& val: path) {\n if(temp->children[val] == NULL)\n temp->children[val] = new Node(val);\n temp = temp->children[val];\n }\n temp->isEnd = true;\n }\n \n counter(root);\n destroy(root);\n \n vector<vector<string>> ans;\n vector<string> temp;\n \n collect(root, temp, ans);\n \n return ans;\n }\n};\n``` | 3 | 0 | ['Depth-First Search', 'Trie', 'C'] | 0 |
delete-duplicate-folders-in-system | c++ | easy | short | c-easy-short-by-venomhighs7-wcxc | \n\n# Code\n\nstruct Node {\n string name;\n map<string, Node*> next; \n bool del = false;\n Node(string n = "") : name(n) {}\n};\nclass Solution {\ | venomhighs7 | NORMAL | 2022-10-14T04:15:15.774248+00:00 | 2022-10-14T04:15:15.774282+00:00 | 661 | false | \n\n# Code\n```\nstruct Node {\n string name;\n map<string, Node*> next; \n bool del = false;\n Node(string n = "") : name(n) {}\n};\nclass Solution {\n void addPath(Node *node, vector<string> &path) { \n for (auto &s : path) {\n if (node->next.count(s) == 0) node->next[s] = new Node(s);\n node = node->next[s];\n }\n }\n unordered_map<string, Node*> seen; \n string dedupe(Node *node) { \n string subfolder;\n for (auto &[name, next] : node->next) {\n subfolder += dedupe(next);\n }\n if (subfolder.size()) { \n if (seen.count(subfolder)) { \n seen[subfolder]->del = node->del = true;\n } else {\n seen[subfolder] = node;\n }\n }\n return "(" + node->name + subfolder + ")"; \n }\n vector<vector<string>> ans;\n vector<string> path;\n void getPath(Node *node) {\n if (node->del) return; \n path.push_back(node->name);\n ans.push_back(path);\n for (auto &[name, next] : node->next) {\n getPath(next);\n }\n path.pop_back();\n }\npublic:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& A) {\n Node root;\n for (auto &path : A) addPath(&root, path);\n dedupe(&root);\n for (auto &[name, next] : root.next) getPath(next);\n return ans;\n }\n};\n``` | 2 | 0 | ['C++'] | 0 |
delete-duplicate-folders-in-system | Good and Naively bad hashes: analysis | good-and-naively-bad-hashes-analysis-by-ty97q | If you went through a number of solutions, you\'ll notice that they mostly use the same hash for a folder:\n\n\n "(" + "".join(folder + serialize(s) for s in su | optimizeMyDay | NORMAL | 2022-06-26T15:36:35.452408+00:00 | 2022-06-26T15:36:35.452453+00:00 | 461 | false | If you went through a number of solutions, you\'ll notice that they mostly use the same hash for a folder:\n\n```\n "(" + "".join(folder + serialize(s) for s in subfolders) + ")"\n ```\n \n However, if you previously solved [Find duplicate subtrees](https://leetcode.com/problems/find-duplicate-subtrees/), you\'ll know that in that case, we were dealing with binary trees (rather than n-ary like here) and the serialisation for a node could look like:\n \n ```\nstr(node.val) + serialize(node.left) + serialize(node.right)\n ```\n or\n ```\n serialize(node.left) + "," + serialialize(node.right) + "," + str(node.val)\n ```\n \n or really any other combo of simply appending serialization of 2 children to the node value itself.\n\n**So what\'s different now?**\n\n \n1. We have multiple children and we cannot trust that the input will be sorted for us. Since we are keeping subfolders/children in a hashmap, the order of keys is not guaranteed\n\nfor input like\n```\n[[\'x\'], [\'y\'] [\'x\',\'a\'], [\'x\',\'b\'], [\'x\',\'c\'], [\'y\',\'c\'], [\'y\',\'b\'], [\'y\',\'a\']]\n```\nIf \'x\' has a serialisation of (\'abc\') and \'y\' will have (\'cba\'), then we will miss the case \n\n\n\neven though the two folders are clearly duplicates\n\n\n\n\nthat means that *as you are building* the Tree/Node/Trie, you could sort the paths list first\u044E\n\n2. Why do we add parentheses?\n\nI decided not to add them and failed on this test case:\n\n\n\nWe can see that they are pretty different, but without parentheses, here are:\n\nserialisation of A:\n\n```\nC + serialize(C\'s children) + W + serialise(W\'s children) = C + B + W +Y = CBWY\n```\n\n\nserialisation of Z:\n\n```\nC + serialize(C\'s children) = C + B + W + serialize(W\'s children) = C + B + W + Y = CBWY\n```\n\n\nBut in realiaty they are more like\n```\n[C->B][W->Y]\n```\nand \n```\nC->[B][W->Y]\n```\nor any other groupping you can come up with to distinguish the different subtrees.\n | 2 | 0 | ['Python', 'Java'] | 0 |
delete-duplicate-folders-in-system | DFS | TRIE with TriePool | Marking nodes | dfs-trie-with-triepool-marking-nodes-by-x1wqr | First we create the Trie using the given path array.\nSince the constraints mention that If a node at any level is inserted, then it is made sure that all the o | ajay_5097 | NORMAL | 2021-11-05T18:55:20.601825+00:00 | 2021-11-05T18:55:20.601866+00:00 | 297 | false | First we create the Trie using the given path array.\nSince the constraints mention that If a node at any level is inserted, then it is made sure that all the other nodes before this node will be also be inserted.\n\nOnce the Trie is created, we do the dfs on the trie and try to serialize the subtrees each node. We map the subtree string to the parent node. While traversing if we find any node/string, which has already been visited then we mark the current node as deleted and the node which is present in the map also deleted.\n\nWe again do the dfs on the Trie tree in post order traversal fashion. And if any of the node is deleted then we don\'t visit the nodes under the current node.\n```\nclass Node {\npublic:\n map<string, Node*> child;\n bool isDeleted;\n string name;\n};\nconst int nax = 1e6 + 100;\nNode pool[nax], *root;\nint nodeCnt = 0;\nmap<string, Node*> seen;\nvector< vector<string> > result;\n//factory method to get the Trie node\nNode *getNode(string data) {\n Node *cur = &pool[++nodeCnt];\n cur->isDeleted = false;\n cur->child.clear();\n cur->name = data;\n return cur;\n}\n\n//Trie Like insert Tree Function\nvoid insert(Node *root, vector<string> &path) {\n Node *cur = root;\n for (auto x: path) {\n if (cur->child[x] == nullptr) {\n cur->child[x] = getNode(x);\n }\n cur = cur->child[x];\n }\n}\n\n//update/mark the nodes to be deleted\nstring mark(Node *root) {\n string subtree = "";\n for (auto [chilldName, childPtr]: root->child) {\n if (childPtr)\n subtree += mark(childPtr);\n }\n //current node is leaf node\n if (subtree.size() == 0) {\n return "(" + root->name + ")";\n }\n //This subtree is present in some other part of the tree\n if (seen.find(subtree) != seen.end()) {\n seen[subtree]->isDeleted = root->isDeleted = true;\n }\n seen[subtree] = root;\n return "(" + root->name + subtree + ")";\n}\n\nvoid dfs(Node *root, vector<string> path) {\n if (root == nullptr || root->isDeleted) {\n return ;\n }\n if (path.size() > 0) {\n result.push_back(path);\n }\n for (auto [childName, childPtr]: root->child) {\n path.push_back(childName);\n dfs(childPtr, path);\n path.pop_back();\n }\n}\n\nclass Solution {\npublic:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n nodeCnt = 0;\n seen.clear();\n result.clear();\n //build Trie Like tree\n root = getNode("");\n for (auto path: paths) {\n insert(root, path);\n }\n //Traverse the treee and delete the nodes\n mark(root);\n //again extract the nodes from the tree\n dfs(root, {});\n return result;\n }\n};\n``` | 2 | 0 | [] | 0 |
delete-duplicate-folders-in-system | LeetCoders seem to rarely care about memory leak | leetcoders-seem-to-rarely-care-about-mem-j3me | As far as I observe, LeetCoders care mostly about speed and memory usage. Who wants to bother releasing heap memory?\nHowever, it does not mean one should be un | cqbaoyi | NORMAL | 2021-07-31T16:15:32.228310+00:00 | 2021-10-10T15:14:55.190723+00:00 | 506 | false | As far as I observe, LeetCoders care mostly about speed and memory usage. Who wants to bother releasing heap memory?\nHowever, it does not mean one should be unaware of it. If you use `new`, keep in mind to use `delete`. At least in a real interview, mention it.\n\nThe credit of solution itself goes to @lzl124631x.\n```\nstruct Node{\n Node(string s = ""): name(s) {};\n \n string name;\n map<string, Node*> children;\n bool toDelete = false;\n};\n\nclass Solution {\nprivate:\n unordered_map<string, Node*> seen; // substructure string to the first met Node\n vector<vector<string>> res;\npublic:\n string mark(Node* root)\n {\n string subStructure = "";\n for (auto [name, node]: root->children)\n subStructure += mark(node);\n \n if (!subStructure.empty())\n {\n if (seen.find(subStructure) != seen.end())\n {\n seen[subStructure]->toDelete = true;\n root->toDelete = true;\n }\n else\n seen[subStructure] = root;\n }\n return "(" + root->name + subStructure + ")";\n }\n \n void collect(Node* root, vector<string>& cur)\n {\n if (!root)\n return;\n for (auto [name, node]: root->children)\n {\n if (node->toDelete)\n continue;\n cur.push_back(name);\n res.push_back(cur);\n collect(node, cur);\n cur.pop_back();\n }\n }\n \n void clean(Node* root)\n {\n if (!root)\n return;\n for (auto [_, node]: root->children)\n clean(node);\n delete root;\n }\n \n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n // build the tree\n Node* root = new Node();\n for (auto& path: paths)\n {\n Node* cur = root;\n for (auto& folder: path)\n {\n if (cur->children.find(folder) == cur->children.end())\n cur->children[folder] = new Node(folder);\n cur = cur->children[folder];\n }\n }\n \n // mark the to-delete nodes\n string dummy = mark(root);\n \n // collect qualified nodes to result\n vector<string> cur;\n collect(root, cur);\n \n // release the heap memory\n clean(root);\n \n return res;\n }\n};\n``` | 2 | 0 | ['C'] | 1 |
delete-duplicate-folders-in-system | Cached hash & Merkle Tree | cached-hash-merkle-tree-by-maristie-afqb | There are 2 major points worthy of attention in this problem.\n1. Efficient traversal of all folders (and corresponding paths)\n2. Efficient elimination of dupl | maristie | NORMAL | 2021-07-28T11:00:19.408566+00:00 | 2021-07-28T11:24:38.247398+00:00 | 702 | false | There are 2 major points worthy of attention in this problem.\n1. Efficient traversal of all folders (and corresponding paths)\n2. Efficient elimination of duplicates\n\nThe problem\'s description hints at a *trie*, but I think it is simply a general tree with possibly multiple children, which is natural to use to model a file system structure.\n\nFor point 1, it\'s obvious that we need to perform a DFS. One non-trivial thing is that subfolder names are guaranteed to be unique under the same parent folder, which enables us to use a dictionary to store its children and iterate over the `dict` during DFS.\n\nPoint 2 is what deserves elaboration. There are two sub-points to mention.\n1. In which cases are two folders *equal*?\nEquality testing is based on its subfolders (including names and substructures), but **NOT** the name of itself. To compare *substructures*, the idea of [Merkle Tree](https://en.wikipedia.org/wiki/Merkle_tree), which takes the aggregated hash of respective subfolders to decide equality of two parent folders, just fits in the problem.\n\n2. How to efficiently compute the hash value?\nMany solutions use strings. But the lengths of strings are unbounded, which may cause exceedingly large time and space costs. A better alternative is to implement a simple hash function. But such a function may be called multiple times. Instead we only compute the hashes after the tree will be no longer mutated, and store the computed hashes as an attribute of tree nodes to avoid redundant computations, which is the same idea as [652. Find Duplicate Subtrees](https://leetcode.com/problems/find-duplicate-subtrees/).\n\n```python\nclass Tree:\n\n class Node:\n def __init__(self, name: str):\n self.name: str = name\n self.children: Dict[str, \'Tree.Node\'] = {}\n\n def __eq__(self, other: \'Tree.Node\'):\n return self.children == other.children\n\n def __hash__(self):\n if not hasattr(self, \'_cached_hash\'):\n self._cached_hash = hash(frozenset(self.children.items()))\n return self._cached_hash\n\n def __bool__(self):\n return True if self.children else False\n\n def __iter__(self):\n """Return an iterator of pre-order traversal."""\n yield self\n for child in self.children.values():\n yield from iter(child)\n\n def gen_paths(self, cond: Callable[[\'Tree.Node\'], Any], path: List[str]) -> Generator[List[str], None, None]:\n """Return an iterator of paths whose nodes all satisfy cond."""\n if cond(self):\n yield path\n for child in self.children.values():\n path.append(child.name)\n yield from child.gen_paths(cond, path)\n path.pop()\n\n _ROOT_FOLDER = \'_ROOT_FOLDER\'\n\n def __init__(self):\n self._root = Tree.Node(Tree._ROOT_FOLDER)\n\n def __iter__(self):\n yield from self._root\n\n def insert(self, path: List[str]):\n cur = self._root\n for folder in path:\n cur = cur.children.setdefault(folder, Tree.Node(folder))\n\n def paths(self, cond: Callable[[\'Tree.Node\'], Any]) -> Generator[List[str], None, None]:\n yield from self._root.gen_paths(cond, [])\n\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n # Initialize file structure tree\n tree = Tree()\n for p in paths:\n tree.insert(p)\n\n # Count duplicates and mark them\n counter = Counter()\n for node in tree:\n counter[node] += 1\n for node in tree:\n node.marked = True if node and counter[node] > 1 else False\n\n # Collect non-empty paths\n return [p.copy() for p in tree.paths(lambda x: not x.marked) if p]\n``` | 2 | 0 | [] | 0 |
delete-duplicate-folders-in-system | [Python] Tree Hashing: assign unique ID for each unique subtree to save space cost | python-tree-hashing-assign-unique-id-for-2bcg | \nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n """\n n = len(path)\n d = len(path[i]) | oystermax | NORMAL | 2021-07-28T01:30:24.949820+00:00 | 2021-07-28T01:40:40.187152+00:00 | 288 | false | ```\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n """\n n = len(path)\n d = len(path[i])\n l = len(path[i][j])\n L = sum(l)\n """\n # build trie\n # Time: O(n*d)\n # Space: O(n)\n root = {}\n for path in paths:\n curr = root\n for d in path:\n if d not in curr:\n curr[d] = {}\n curr = curr[d]\n \n # hash and find duplicates\n # Time: O(n * avg_n_children)\n # Space: O(n + n_unique_id)\n\t\t## each time we find a unique subtree, assign a unique ID\n subtree_id = defaultdict(None)\n subtree_id.default_factory = subtree_id.__len__\n\t\t## group subtrees by ID. \n duplicates = defaultdict(list)\n def hash_(node):\n if not node:\n return None\n\t\t\t# sort keys and get hash code for each child\n\t\t\t# concatenate them for the hash code of current node\n hash_code = \'\'.join([\n f\'({child}:{hash_(node[child])})\'\n for child in sorted(node.keys())\n ])\n\t\t\t# get ID of the hash code, if not found, assign new ID as the length\n id_ = subtree_id[hash_code]\n\t\t\t# update duplicate list by ID\n duplicates[id_].append(node)\n return id_\n\n hash_(root)\n \n # mark duplicate subtrees in trie\n for subtrees in duplicates.values():\n if len(subtrees) == 1:\n continue\n for subtree in subtrees:\n subtree[\'is_dup\'] = True\n\t\t\t\t\n # dfs for output: generate paths for each non-duplicate folder\n # Time: O(n)\n # Space: O(1)\n res = []\n def dfs(node, path):\n\t\t\t# if current folder is makred duplicate, terminate the path\n if \'is_dup\' in node:\n return\n\t\t\t# if a non-empty path is found, put in result\n if path:\n res.append(path)\n\n for k, v in node.items():\n dfs(v, path + [k])\n return \n\t\t\t\n dfs(root, [])\n return res\n \n \n``` | 2 | 1 | ['Depth-First Search', 'Trie', 'Python'] | 0 |
delete-duplicate-folders-in-system | (C++) 1948. Delete Duplicate Folders in System | c-1948-delete-duplicate-folders-in-syste-og15 | \n\nclass Node {\npublic: \n bool mark = false; \n int index = -1; \n unordered_map<string, Node*> next; \n};\n\nclass Solution {\npublic:\n vector< | qeetcode | NORMAL | 2021-07-26T19:50:52.517713+00:00 | 2021-07-26T19:50:52.517757+00:00 | 282 | false | \n```\nclass Node {\npublic: \n bool mark = false; \n int index = -1; \n unordered_map<string, Node*> next; \n};\n\nclass Solution {\npublic:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n sort(paths.begin(), paths.end());\n \n Node* tree = new Node(); \n for (int i = 0; i < paths.size(); ++i) {\n Node* node = tree; \n for (auto& x : paths[i]) {\n if (node->next.count(x) == 0) node->next[x] = new Node(); \n node = node->next[x]; \n }\n node->index = i; \n }\n \n unordered_map<string, vector<int>> mp; \n \n function<string(Node*)> fn = [&](Node* node) -> string {\n if (node->next.size() == 0) return "$"; \n string ans; \n for (auto& [k, nn] : node->next) \n ans += "$" + k + "$" + fn(nn); \n mp[ans].push_back(node->index); \n return ans; \n };\n \n fn(tree); \n \n unordered_set<int> mark; \n for (auto& [k, v] : mp) \n if (v.size() > 1) \n for (auto& vv : v) mark.insert(vv); \n \n vector<vector<string>> ans; \n stack<Node*> stk; \n stk.push(tree); \n \n while(stk.size()) {\n Node* node = stk.top(); stk.pop(); \n if (node->index >= 0) ans.push_back(paths[node->index]); \n for (auto& [k, nn] : node->next) {\n if (mark.find(nn->index) == mark.end()) \n stk.push(nn); \n }\n }\n return ans; \n }\n};\n``` | 2 | 0 | ['C'] | 0 |
delete-duplicate-folders-in-system | Tree + Hashing [C++ clean and clear Code] | tree-hashing-c-clean-and-clear-code-by-a-ek05 | idea is, we will calculate the hash of all the subtree and if the count of hash value of any subtree is greater than 1, then will mark that subtree as not incl | abhishek201202 | NORMAL | 2021-07-25T06:38:34.335775+00:00 | 2021-07-25T06:42:24.439114+00:00 | 392 | false | idea is, we will calculate the hash of all the subtree and if the count of hash value of any subtree is greater than 1, then will mark that subtree as not included.\n\n```\nconst int mod = 1e9 + 7;\nconst int p = 163;\nmap<string, int> id;\nmap<int, string> Rid;\nint sz;\nstruct Node{\n map<int, Node*> child;\n int hash, not_include;\n int val;\n\n Node(){\n hash = 0;\n not_include = 0;\n val = -1;\n }\n};\n\n\nvoid insert(Node *root, vector<string> a){\n for(string s : a){\n int idx = id[s];\n if(!root -> child.count(idx)){\n root -> child[idx] = new Node();\n }\n root = root -> child[idx];\n root -> val = idx;\n }\n}\n \n \nvoid pre(Node *root){\n function<int(Node*)> dfs = [&](Node *root){\n if(root == NULL){\n return 0;\n }\n int h = 0;\n for(auto [i, v] : root -> child){\n h = ((h * 1ll * p)%mod + ((dfs(root -> child[i]) * 1ll * p)%mod + i + 1)%mod )%mod;\n }\n root -> hash = h;\n return h;\n };\n dfs(root);\n}\n\n \nvoid remove(Node *root){\n map<int, int> fr;\n function<void(Node*)> dfs = [&](Node *root){\n if(root == NULL) return;\n fr[root -> hash]++;\n for(auto [i, v] : root -> child){\n if(root -> child[i]){\n dfs(root -> child[i]);\n }\n }\n };\n dfs(root);\n function<void(Node*)> dfs2 = [&](Node* root){\n if(root == NULL) return;\n if(fr[root -> hash] > 1 && root -> hash){\n root -> not_include = 1;\n return;\n }\n for(auto [i, v] : root -> child){\n if(root -> child[i]){\n dfs2(root -> child[i]);\n }\n }\n };\n dfs2(root);\n}\n \n \nvector<vector<string>> collect(Node *root){\n vector<vector<string>> res;\n function<void(Node*, vector<string>)> dfs = [&](Node *root, vector<string> a){\n if(root == NULL || root -> not_include) return;\n if(a.size()){\n res.push_back(a);\n }\n for(auto [i, v] : root -> child){\n if(root -> child[i]){\n vector<string> b = a;\n b.push_back(Rid[root -> child[i] -> val]);\n dfs(root -> child[i], b);\n }\n }\n };\n dfs(root, {});\n return res;\n}\n \n\n\n\nclass Solution {\npublic:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n ios_base::sync_with_stdio(false);cin.tie(NULL);cout.tie(NULL);\n id.clear();\n Rid.clear();\n int cnt = 1;\n for(vector<string> v : paths){\n for(string s : v){\n if(!id.count(s)){\n id[s] = cnt;\n Rid[cnt] = s;\n cnt++;\n }\n }\n }\n sz = id.size() + 1;\n Node *root = new Node();\n for(vector<string> v : paths){\n insert(root, v);\n }\n pre(root);\n remove(root);\n vector<vector<string>> res = collect(root);\n return res;\n }\n};\n``` | 2 | 0 | [] | 1 |
delete-duplicate-folders-in-system | Java Tire , DFS, HashMap :: 100% faster | java-tire-dfs-hashmap-100-faster-by-maha-f6i3 | Here first we have build trie tree insert() .\nThen we have dfs the whole tree and stored the child part into String dfsTrie() .\nIf the string is repeating we | mahafuzzaman02 | NORMAL | 2021-07-25T05:01:39.794881+00:00 | 2021-07-25T05:06:23.725922+00:00 | 808 | false | Here first we have build trie tree *insert()* .\nThen we have dfs the whole tree and stored the child part into String *dfsTrie()* .\nIf the string is repeating we have set the the value to true. and we have used hashmap to to detect the repetation.\nThen we have used the remove node method there we check is the if the string is repetaing if yes then we remove the node else we dfs to next node .*removeNode()*\nThen we have use simple dfs on whole tree to generate the answer *getAnswer()*.\n\n```\nclass Solution {\n \n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n Trie node = new Trie("/");\n for (List<String> val : paths) {\n insert(node, val);\n }\n HashMap<String, Boolean> map = new HashMap<>();\n dfsTrie(node, map);\n removeNode(node, map);\n List<List<String>> ans = new ArrayList<>();\n getAnswer(node, new LinkedList<>(), ans);\n return ans;\n\n }\n\n void insert(Trie node, List<String> paths) {\n for (String a : paths) {\n if (node.next.get(a) == null) {\n node.next.put(a, new Trie(a));\n }\n node = node.next.get(a);\n }\n }\n\n String dfsTrie(Trie node, HashMap<String, Boolean> map) {\n StringBuilder sb = new StringBuilder();\n for (Trie val : node.next.values()) {\n if (val != null) {\n String rec = dfsTrie(val, map);\n if (rec.length()>0 && map.containsKey(rec)) {\n map.put(rec, true);\n } else {\n map.put(rec, false);\n }\n sb.append(rec + val.a);\n }\n }\n node.dirName = sb.toString();\n return sb.toString();\n }\n\n void removeNode(Trie node, HashMap<String, Boolean> map) {\n for (Map.Entry<String,Trie> val : node.next.entrySet() ) {\n if (val.getValue().dirName != null && map.get(val.getValue().dirName)) {\n node.next.put(val.getKey(), null);\n }\n if (node.next.get(val.getKey()) != null) {\n removeNode(node.next.get(val.getKey()), map);\n }\n }\n }\n\n void getAnswer(Trie node, LinkedList<String> res, List<List<String>> ans) {\n for (Trie val : node.next.values()) {\n if (val != null) {\n res.add(val.a + "");\n ans.add(new ArrayList<>(res));\n getAnswer(val, res, ans);\n res.removeLast();\n }\n }\n }\n\n class Trie {\n String a;\n HashMap<String, Trie> next;\n String dirName;\n\n Trie(String a) {\n this.a = a;\n this.next = new HashMap<>();\n dirName = null;\n }\n }\n\n}\n``` | 2 | 0 | ['Tree', 'Depth-First Search', 'Trie', 'Java'] | 2 |
delete-duplicate-folders-in-system | [Python] Directory tree | Content hash | python-directory-tree-content-hash-by-9r-rvot | Create directory tree.\nCalculate hash of each directory.\nConstruct a dictionary which maps the hash to the list of nodes that have this hash.\nFor every hash | 9rib-on-the-grind | NORMAL | 2021-07-25T04:10:23.597619+00:00 | 2021-07-25T08:33:07.899704+00:00 | 619 | false | Create directory tree.\nCalculate hash of each directory.\nConstruct a dictionary which maps the hash to the list of nodes that have this hash.\nFor every hash pointing to the list with more than 1 element, mark nodes as deleted.\nRecursively construct all valid paths that were not pruned.\n\n\n```python\nclass Node:\n def __init__(self):\n self.children = defaultdict(Node)\n self.hash = \'\'\n self.deleted = False\n \n def add(self, path, i=0):\n if i != len(path):\n self.children[path[i]].add(path, i + 1)\n \n def calc_hash(self, hashes, name=\'root\'):\n for child_name, child in sorted(self.children.items()):\n self.hash += f\'{child.calc_hash(hashes, child_name)}+\'\n if self.hash:\n hashes[self.hash].append(self)\n return f\'{name}({self.hash})\'\n \n def to_list(self, lst, path=[]):\n for name, node in self.children.items():\n if not node.deleted:\n lst.append(path + [name])\n node.to_list(lst, path + [name])\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n root = Node()\n for path in paths:\n root.add(path)\n hash_to_nodes = defaultdict(list)\n root.calc_hash(hash_to_nodes)\n for nodes in hash_to_nodes.values():\n if len(nodes) > 1:\n for node in nodes:\n node.deleted = True\n res = []\n root.to_list(res)\n return res\n``` | 2 | 0 | ['Tree', 'Python'] | 1 |
delete-duplicate-folders-in-system | Trie Tree + hash | trie-tree-hash-by-chenyuanqin826-g07x | \nclass Trie{\n Map<String, Trie> children;\n boolean leaf;\n boolean del;\n Trie(){\n children = new TreeMap<>();\n | chenyuanqin826 | NORMAL | 2021-07-25T04:07:14.288890+00:00 | 2021-07-25T04:07:14.288936+00:00 | 386 | false | ```\nclass Trie{\n Map<String, Trie> children;\n boolean leaf;\n boolean del;\n Trie(){\n children = new TreeMap<>();\n }\n void insert(List<String> word){\n Trie root = this;\n for (String c : word){\n root = root.children.compute(c, (k, v) -> v == null ? new Trie() : v);\n }\n root.leaf = true;\n }\n Trie next(String c){\n return this.children.get(c);\n }\n}\n\nclass Solution {\n \n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n Trie t = new Trie();\n for (List<String> path : paths){\n t.insert(path);\n }\n Map<String, Trie> map = new HashMap<>();\n dfs(t, map);\n List<List<String>> ans = new ArrayList<>();\n travel(t, new ArrayList<>(), ans);\n return ans;\n }\n String dfs(Trie t, Map<String, Trie> map){\n if (t == null){\n return "";\n }\n StringBuilder sb = new StringBuilder();\n for (Map.Entry<String, Trie> entry : t.children.entrySet()){\n String s = entry.getKey()+ ","+ dfs(entry.getValue(), map);\n sb.append("(" + s + ")");\n }\n if (sb.toString().length() > 0 && map.containsKey(sb.toString())){\n t.del = true;\n map.get(sb.toString()).del = true;\n }else{\n map.put(sb.toString(), t);\n }\n return sb.toString();\n }\n void travel(Trie t, List<String> path, List<List<String>> ans){\n if (t == null || t.del){\n return;\n }\n if (path.size() > 0){\n ans.add(new ArrayList<String>(path));\n }\n for (Map.Entry<String, Trie> entry : t.children.entrySet()){\n path.add(entry.getKey());\n travel(entry.getValue(), path, ans);\n path.remove(path.size() - 1);\n }\n }\n}\n``` | 2 | 0 | [] | 0 |
delete-duplicate-folders-in-system | DFS traversal and encode tree's values and tree's structure | dfs-traversal-and-encode-trees-values-an-ktol | \n class Solution {\n \n class Trie {\n \n int val;\n Map<Integer, Trie> next; \n String encoded;\n boolean isDeleted;\ | cucmai | NORMAL | 2021-07-25T04:02:39.238653+00:00 | 2021-07-25T04:02:39.238687+00:00 | 339 | false | ```\n class Solution {\n \n class Trie {\n \n int val;\n Map<Integer, Trie> next; \n String encoded;\n boolean isDeleted;\n \n Trie(int val) {\n this.val = val;\n this.next = new HashMap<Integer, Trie>();\n this.encoded = "";\n this.isDeleted = false;\n }\n }\n \n public String dfsToEncode(Trie root, Map<String, Trie> treeMap) {\n \n for(Integer key : root.next.keySet()) {\n root.encoded = (root.encoded.length() > 0) ? root.encoded + \',\' + dfsToEncode(root.next.get(key), treeMap) : dfsToEncode(root.next.get(key), treeMap);\n }\n \n if (root.encoded.length() > 0) {\n if (!treeMap.containsKey(root.encoded)) {\n treeMap.put(root.encoded, root); \n } else {\n root.isDeleted = true;\n treeMap.get(root.encoded).isDeleted = true;\n }\n \n if (root.encoded.length() == 0) {\n return String.valueOf(root.val);\n } \n }\n \n return String.valueOf(root.val) + "," + root.encoded;\n }\n \n public static List<String> clone(List<String> list) {\n \n List<String> clonedList = new ArrayList<String>();\n \n for(String s : list) {\n clonedList.add(s);\n }\n \n return clonedList;\n }\n \n public void dfs(Trie root, Map<Integer, String> intToFolder, List<List<String>> ans, List<String> pre) {\n \n if (!root.isDeleted) {\n \n List<String> cur = clone(pre);\n \n if (root.val != -1) {\n cur.add(intToFolder.get(root.val));\n ans.add(cur);\n }\n \n for(Integer key : root.next.keySet()) {\n // System.out.println(key + " " + intToFolder.get(root.val));\n dfs(root.next.get(key), intToFolder, ans, cur);\n }\n }\n }\n \n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n \n Map<String, Integer> folderToInt = new HashMap<String, Integer>();\n Map<Integer, String> intToFolder = new HashMap<Integer, String>();\n \n int cnt = 0;\n \n Trie root = new Trie(-1);\n for(List<String> path : paths) {\n Trie cur = root;\n for(String folder : path) {\n \n if (!folderToInt.containsKey(folder)) {\n folderToInt.put(folder, cnt);\n intToFolder.put(cnt, folder);\n cnt++;\n }\n \n int num = folderToInt.get(folder);\n \n if (!cur.next.containsKey(num)) {\n cur.next.put(num, new Trie(num));\n }\n cur = cur.next.get(num);\n }\n }\n \n Map<String, Trie> treeMap = new HashMap<String, Trie>();\n dfsToEncode(root, treeMap);\n \n List<List<String>> ans = new ArrayList<List<String>>();\n dfs(root, intToFolder, ans, new ArrayList<String>());\n \n return ans;\n }\n}\n``` | 2 | 0 | [] | 0 |
delete-duplicate-folders-in-system | Python solution + explanation | python-solution-explanation-by-yrnzbfr-n4dx | Intuition\n1. Use Trie for Optimal Storage\n2. Serialize Trie Nodes\n3. Detect and Mark Duplicates\n4. Construct Result Paths\n\n# Approach\n1. Use Trie for Opt | YRnzBfR | NORMAL | 2024-05-29T13:49:51.445767+00:00 | 2024-05-29T13:49:51.445796+00:00 | 91 | false | # Intuition\n1. Use Trie for Optimal Storage\n2. Serialize Trie Nodes\n3. Detect and Mark Duplicates\n4. Construct Result Paths\n\n# Approach\n1. Use Trie for Optimal Storage:\n**Reason**: A Trie efficiently stores hierarchical paths, where each node represents a directory and paths share common prefixes.\n**Alternative**: While a general tree or graph could also represent the paths, Tries are efficient for prefix-based operations and hierarchical data.\n\n1. Serialize Trie Nodes:\n**HashKey**: Serialize each Trie node into a unique string representation. The format used is "<parent_name>(serialize(child_subtrie))".\n**Consistency**: Ensure children are sorted lexicographically during serialization to maintain consistency.\n**Storage**: Store serialized nodes in a `children_hash` HashMap. Each unique serialized string points to a list of parent nodes.\n\n1. Detect and Mark Duplicates:\nIterate over the `children_hash` and mark nodes for deletion if their serialized subtree appears more than once (i.e., if the list of parents has more than one node).\n\n1. Construct Result Paths:\n**Backtracking**: For each non-deleted node, append the folder name to the current path and add it to the results list.\n\n# Complexity\n- Time complexity: `O(N*Llog(L))`\n\nInsertion: `O(L)`, where `L` is the average length of the paths.\nSerialization: `O(N*Llog(L))`\n`N` - the number of nodes\n`L` - max(length of a path).\n`L*log(L)` - sorting the children for each node during serialization \n\n- Space complexity: `O(N+L)`\n`N` - the number of nodes\n`O(L)` - maximum depth of the recursive call stack during serialization\n\n# Code\n```python3\nclass TrieNode():\n def __init__(self):\n self.children = {}\n self.to_delete = False\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n self.children_hash = defaultdict(list)\n\n def insert_paths(self, paths: List[List[str]]):\n for path in paths:\n cur = self.root\n for folder in path:\n if folder not in cur.children:\n cur.children[folder] = TrieNode()\n cur = cur.children[folder]\n \n def serialise_children(self, node):\n hash_key = []\n for name, child in sorted(node.children.items()):\n hash_key.append(f"{name}({self.serialise_children(child)})")\n \n hash_key_str = "".join(hash_key)\n if hash_key_str:\n self.children_hash[hash_key_str].append(node)\n return hash_key_str\n \n def mark_duplicates(self):\n for hash_key in self.children_hash:\n if len(self.children_hash[hash_key]) > 1:\n for node in self.children_hash[hash_key]:\n node.to_delete = True\n\n def _construct_result_path(self, node, path, result):\n for ch, child in node.children.items():\n if not child.to_delete:\n path.append(ch)\n result.append(path[:])\n self._construct_result_path(child, path, result)\n path.pop()\n\n def get_folders_without_duplicates(self):\n result = []\n self._construct_result_path(self.root, [], result)\n return result\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n trie = Trie()\n trie.insert_paths(paths)\n trie.serialise_children(trie.root)\n trie.mark_duplicates()\n result = trie.get_folders_without_duplicates()\n return result\n``` | 1 | 0 | ['Hash Table', 'Backtracking', 'Trie', 'Python3'] | 0 |
delete-duplicate-folders-in-system | Java optimized solution (beast 100%) | java-optimized-solution-beast-100-by-ale-yun4 | Intuition\nTo store full info about folder\'s content in a huge string and use HashMap to check for duplicates\n Describe your first thoughts on how to solve th | alex-yer | NORMAL | 2024-02-02T09:44:04.484653+00:00 | 2024-02-02T09:44:04.484683+00:00 | 200 | false | # Intuition\nTo store full info about folder\'s content in a **huge** string and use HashMap to check for duplicates\n<!-- Describe your first thoughts on how to solve this problem. -->\n\n# Approach\nTo be efficient, we sort incoming array by path string length, and then iterate through it forward (to make sure all parent folders already iterated before) or backward (to make sure all children folders are already iterated before).\n\nWe use TreeMap to store children - although I haven\'t noticed any difference in peformance.\n<!-- Describe your approach to solving the problem. -->\n\n# Complexity\n- Time complexity:\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\n\n- Space complexity:\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->\n\n# Code\n```\nclass Solution {\n class Node {\n Map<String, Node> subNodes = new TreeMap<>();\n\n String content = "";\n\n boolean remove = false;\n\n void markRemove() {\n if (remove) {\n return;\n }\n remove = true;\n if (subNodes != null) {\n for (Node value : subNodes.values()) {\n value.markRemove();\n }\n }\n }\n }\n\n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n paths.sort(Comparator.comparingInt(List::size));\n List<Node> nodes = new ArrayList<>(paths.size());\n Node rootNode = new Node();\n for (List<String> pathList : paths) {\n Node current = rootNode;\n int last = pathList.size() - 1;\n for (int i = 0; i < last; i++) {\n String s = pathList.get(i);\n current = current.subNodes.get(s);\n }\n String name = pathList.get(last);\n Node node = new Node();\n current.subNodes.put(name, node);\n nodes.add(node);\n }\n StringBuilder content = new StringBuilder();\n Map<String, Node> nodeByContent = new HashMap<>();\n for (int i = nodes.size() - 1; i >= 0; i--) {\n Node node = nodes.get(i);\n if (node.subNodes.isEmpty()) {\n continue;\n }\n for (Map.Entry<String, Node> entry : node.subNodes.entrySet()) {\n content.append(entry.getKey()).append(\'{\').append(entry.getValue().content).append(\'}\');\n }\n node.content = content.toString();\n content.delete(0, content.length());\n Node similar = nodeByContent.putIfAbsent(node.content, node);\n if (similar != null) {\n node.markRemove();\n similar.markRemove();\n }\n }\n List<List<String>> ans = new ArrayList<>();\n for (int i = 0; i < paths.size(); i++) {\n if (!nodes.get(i).remove) {\n ans.add(paths.get(i));\n }\n }\n return ans;\n }\n}\n``` | 1 | 0 | ['Java'] | 0 |
delete-duplicate-folders-in-system | C++ | Passes all tests | Explanation | 95% Faster | 80% less mem | c-passes-all-tests-explanation-95-faster-fcdg | \n/*\nImportant to note:\n1) Here we use Map in Node as to unordered_map. This is due to the fact that when you will\n\trun dfs or serialize on the subfolders a | gkaran | NORMAL | 2022-09-04T00:21:37.279193+00:00 | 2022-09-04T00:21:37.279227+00:00 | 336 | false | ```\n/*\nImportant to note:\n1) Here we use Map in Node as to unordered_map. This is due to the fact that when you will\n\trun dfs or serialize on the subfolders althrough they have exact same folder structure. \n\tThe key in the duplicates map might be different when you use unordered_map.\n2) It is very important to define the structure of root->child i.e. by adding ( and ) as necessary\n\tas different folder structure might also lead to same keys in duplicates again causing\n\tproblems.\n*/\nstruct Node {\n map<string, Node*> child;\n bool toDelete = false;\n};\n\nclass Solution {\n unordered_map<string, vector<Node*>> duplicates;\n vector<vector<string>> ans;\npublic:\n string dfs(Node* root) {\n string currValue = "";\n if (root->child.size() > 0) {\n currValue += "(";\n } else {\n return "";\n }\n for (auto& child : root->child) {\n currValue += child.first + dfs(child.second);\n }\n currValue += ")";\n \n if (currValue != "") {\n duplicates[currValue].push_back(root);\n }\n \n return currValue;\n }\n \n void insert(Node* root, vector<string>& path, int i) {\n if (i >= path.size()) return;\n if (root->child.find(path[i]) == root->child.end()) {\n root->child[path[i]] = new Node();\n }\n insert(root->child[path[i]], path, i + 1);\n }\n \n void removeUnwantedNodes(Node* root, vector<string>& temp) {\n if (root->toDelete) return;\n for(auto& child : root->child) {\n temp.push_back(child.first);\n removeUnwantedNodes(child.second, temp);\n temp.pop_back();\n }\n if (temp.size() > 0) {\n ans.push_back(temp);\n }\n }\n \n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n Node* root = new Node();\n for (auto& path : paths) {\n insert(root, path, 0);\n }\n dfs(root);\n \n for (auto& dup : duplicates) {\n if (dup.second.size() > 1) {\n for (auto& node : dup.second) {\n node->toDelete = true;\n }\n }\n }\n \n vector<string> temp;\n removeUnwantedNodes(root, temp);\n \n return ans;\n }\n};\n``` | 1 | 0 | ['Trie', 'C'] | 0 |
delete-duplicate-folders-in-system | Javascript remove duplicated subtrees | javascript-remove-duplicated-subtrees-by-5ik7 | \nfunction Node (val, parent) {\n this.val = val\n this.parent = parent\n this.children = {}\n}\n\n/**\n * @param {string[][]} paths\n * @return {strin | yellowduckyugly | NORMAL | 2022-07-19T10:45:56.340818+00:00 | 2022-07-19T10:45:56.340857+00:00 | 163 | false | ```\nfunction Node (val, parent) {\n this.val = val\n this.parent = parent\n this.children = {}\n}\n\n/**\n * @param {string[][]} paths\n * @return {string[][]}\n */\nvar deleteDuplicateFolder = function(paths) {\n \n //Approach: We can imagine this question as a tree. We want to find all the duplicated subtrees but the root of this subtree can be any value\n \n // 1. construct a tree for file structure\n // 2. traverse the tree and create a subTreeMap of subtree \n // 2.1. The key is bfsTraversal of the subtree. We replace the current node value in the key with *\n // 2.2. the value to the key of this map will be array of nodes that have the same preorderTraversal\n // 3. subTreeMap value has more than 1 element, the subtree structure is the same, we remove this node from it\'s parent\n // 4. traverse the tree to construct the tree as path array for ans\n \n \n // 1. Construct the tree\n const root = new Node(null,null)\n for (const path of paths) {\n let current = root\n for (const file of path) {\n if (!current.children[file]) {\n \n current.children[file] = new Node(file,current) // remember the parent, so it\'s easier to remove this node later\n }\n current = current.children[file]\n \n }\n }\n \n // 2. Construct subTreeMap\n const subTreeMap = {}\n function bfsTraversal(node, order) {\n \n if (Object.keys(node.children).length === 0) { // return node value at the leaf\n return node.val\n }\n let string = node.val+"," // add current node value\n Object.keys(node.children).sort().forEach((key) => {\n string += bfsTraversal(node.children[key], order.slice())+"," // add all subtrees value\n })\n if (node.val) {\n const string2 = string.substring(0,0) + "*" + string.substring(1+node.val.length) // replace current node with *\n if (!subTreeMap[string2]) subTreeMap[string2] = []\n subTreeMap[string2].push(node) // store the node value\n }\n return string\n }\n bfsTraversal(root, [])\n \n // 3. Find duplicated subtrees\n const repeated = Object.values(subTreeMap).filter(array => array.length > 1)\n if (repeated.length > 0) {\n repeated.forEach(repeat => {\n repeat.forEach(node => {\n delete node.parent.children[node.val] // remove this node from the parent\n }) \n })\n }\n \n // 4. Return pruned tree as array\n const fileArray = [] \n function generateFileArray(node,fileNames) {\n if (node.val) {\n fileNames.push(node.val)\n fileArray.push(fileNames.slice()) // add sub directory\n } \n Object.keys(node.children).forEach(key => {\n generateFileArray(node.children[key], fileNames)\n })\n fileNames.pop() // backtrack to next directory\n }\n generateFileArray(root,[])\n return fileArray\n};\n\n\n\n``` | 1 | 0 | ['Backtracking', 'Tree', 'Breadth-First Search'] | 1 |
delete-duplicate-folders-in-system | Java Trie Node + Serialization With Explanation | java-trie-node-serialization-with-explan-l1rt | I want to start by saying this problem is hard, even for a hard problem. This solution took me a good 20-30 minutes just to conceptualize. You could definitely | seanchrisbell | NORMAL | 2022-07-17T00:39:12.942068+00:00 | 2022-07-18T05:49:47.180179+00:00 | 258 | false | I want to start by saying this problem is hard, even for a hard problem. This solution took me a good 20-30 minutes just to conceptualize. You could definitely solve this problem using a directory structure with just Maps, but using Trie Nodes is a little less annoying to code IMO. If you don\'t know what a Trie Node is, you should really go read up on it and try to solve a simpler problem first. \n\n **The idea:**\n We want to build our directory structure using a generic TrieNode builder algorithm, so every directory node contains a reference to each of its child directories, in the form of a map. We will keep track of a list of indicies representing every `i` where `paths[i]` crosses through this node in our directory structure. This will come in handy later when we want to create our non-duplicate path list. \n \n Now all we need is to be able to identify two identical directory structures. From the problem description, we can tell we don\'t care about the name of the directory, but only whether all of its contents are identical to another directory somewhere else. But since every directory contains subdirectories that have their own directory structures, finding two identical structures is not trivial. The solution? Serialization. For any directory `node`, and ant of its children `child`, we can append `child.name:serialize(child)` until our serialization for node looks something like \n ```\n child1.name:serialize(child1),child2.name:serialize(child2),...childn.name:serialize(childn)\n ```\n There is one caveat here, which is that we need the children to be in lexicographic order (or any constant ordering structure) so when we serialize 2 directories with identical contents, we get the same serialization. \n \n Now that we know how to tell when 2 directories are identical, we just need to be able to correlate that to a return value. The problem asks you to return any paths that correspond to directory structures without any identical directories. This is where that list of indicies we kept while building our TrieNode graph comes in handy. If any two directories have identical contents, add all of the indicies from both nodes to our list of duplicate paths. Then just iterate over the list of paths again, adding any path at an index not in our list of duplicates.\n \n The code for this solution ends up looking like this:\n```\nclass Solution {\n \n class TrieNode {\n\t\t// Use TreeMap so keys stay in the same order for any 2 nodes, so when we\n\t\t// serialize 2 nodes with identical structures, we will get the same result\n TreeMap<String, TrieNode> next;\n List<Integer> indicies;\n String contents;\n \n TrieNode() {\n next = new TreeMap();\n indicies = new ArrayList();\n }\n \n TrieNode addChild(String dir) {\n if (next.containsKey(dir))\n return next.get(dir);\n \n next.put(dir, new TrieNode());\n return next.get(dir);\n }\n \n void addIndex(int index) {\n indicies.add(index);\n }\n \n String serialize() {\n if (contents != null)\n return contents;\n \n StringBuilder sb = new StringBuilder();\n next.forEach((k, v) -> {\n sb.append(k);\n sb.append(":");\n sb.append(v.serialize());\n\t\t\t\t// Need to use separate delimiter for same level\n sb.append(",");\n });\n \n return contents = sb.toString();\n }\n }\n \n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n TrieNode root = buildTrie(paths);\n\t\t// Store the indicies of the paths that correspond to duplicate directories\n Set<Integer> duplicates = new HashSet();\n findDuplicates(root, new HashMap<String, List<Integer>>(), duplicates);\n List<List<String>> res = new ArrayList();\n for (int i = 0; i < paths.size(); ++i) {\n if (!duplicates.contains(i))\n res.add(paths.get(i));\n }\n \n return res;\n }\n \n private String findDuplicates(TrieNode node, Map<String, List<Integer>> directories, Set<Integer> dups) {\n for (TrieNode child : node.next.values()) {\n String key = findDuplicates(child, directories, dups);\n if (key.isEmpty())\n continue;\n \n if (directories.containsKey(key)) {\n dups.addAll(child.indicies);\n dups.addAll(directories.get(key));\n } else\n directories.put(key, child.indicies);\n }\n \n return node.serialize();\n }\n \n private TrieNode buildTrie(List<List<String>> paths) {\n TrieNode root = new TrieNode();\n for (int i = 0; i < paths.size(); ++i) {\n TrieNode node = root;\n for (String dir : paths.get(i)) {\n node = node.addChild(dir);\n\t\t\t\t// Add the index so we know if this directory ends up being a \n\t\t\t\t// duplicate, we will know which paths were associated with it\n node.addIndex(i);\n }\n }\n \n return root;\n }\n}\n```\nPlease upvote if you liked the solution or found the explanation useful :) | 1 | 0 | [] | 0 |
delete-duplicate-folders-in-system | C++ Trie cleaned-up code, no duplicated data | c-trie-cleaned-up-code-no-duplicated-dat-yp3h | The version down below is actually the same as most others. It\'s just a little bit cleaned up.\nE.g. there is no need to store the name twice in the trie node | JosipDev | NORMAL | 2022-06-20T15:21:16.212070+00:00 | 2022-06-20T15:26:34.517650+00:00 | 404 | false | The version down below is actually the same as most others. It\'s just a little bit cleaned up.\nE.g. there is no need to store the name twice in the trie node as it\'s already in the children map.\nThere\'s no flags marking the nodes to delete. This is to keep the trie node universal and reusable, i.e. the code to mark thos nodes is splitted in a separate unordered_map.\nOtherwise, there are only to functions to operate on the generated trie - deduplicate, which collects all the nodes it should later avoid collecting in getPaths, which basicalle just reconstructs the paths from the trie taking into account the before mentioned statement.\n\n```\nstruct Trie {\n void insert(const vector<string>& path) {\n auto node = this;\n for (auto& p: path) {\n if (!node->children.count(p)) {\n node->children[p] = make_unique<Trie>();\n }\n node = node->children[p].get();\n }\n }\n map<string, unique_ptr<Trie>> children; \n};\n\nclass Solution {\n Trie root;\n \n map<string, Trie*> seen;\n unordered_set<Trie*> toDelete;\n \n string deduplicate(Trie* node) {\n \n string path; \n for (auto& [s, n]: node->children) { \n path += s + deduplicate(n.get());\n } \n if (path.empty()) return "";\n \n if (seen.count(path)) {\n toDelete.insert(seen[path]);\n toDelete.insert(node);\n } else seen[path] = node; \n \n return "(" + path + ")";\n }\n \n void getPaths(Trie* node, vector<vector<string>>& paths, vector<string>& path) {\n for (auto& [s, n]: node->children) {\n if (toDelete.count(n.get())) continue;\n path.push_back(s);\n getPaths(n.get(), paths, path);\n path.pop_back();\n }\n \n if (!path.empty()) paths.push_back(path);\n }\npublic:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n for (auto& path: paths) root.insert(path);\n paths.clear();\n deduplicate(&root);\n getPaths(&root, paths, vector<string>() = {});\n return paths;\n }\n};\n``` | 1 | 0 | ['Depth-First Search', 'Trie', 'C', 'C++'] | 0 |
delete-duplicate-folders-in-system | Python3 solution - clean code (trie, dfs and Hashmap) | python3-solution-clean-code-trie-dfs-and-qe3i | \nclass TrieNode:\n def __init__(self, char):\n self.children = {}\n self.is_end = False\n self.child_hash = ""\n self.char = cha | myvanillaexistence | NORMAL | 2022-06-16T09:24:49.263510+00:00 | 2022-06-16T09:24:49.263548+00:00 | 321 | false | ```\nclass TrieNode:\n def __init__(self, char):\n self.children = {}\n self.is_end = False\n self.child_hash = ""\n self.char = char\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode("/")\n self.hashmap = collections.defaultdict(int)\n self.duplicates = []\n\n def insert(self, folder):\n current_node = self.root\n for char in folder:\n if char not in current_node.children:\n current_node.children[char] = TrieNode(char)\n current_node = current_node.children[char]\n current_node.is_end = True\n\n def _hash_children(self, root):\n for char in sorted(root.children.keys()):\n self._hash_children(root.children[char])\n root.child_hash += char + \'|\' + root.children[char].child_hash + \'|\' \n self.hashmap[root.child_hash] += 1\n \n def hash_children(self):\n current_node = self.root\n self._hash_children(current_node)\n \n def _get_duplicates(self, root, path):\n if root.children and self.hashmap[root.child_hash] > 1: \n return\n self.duplicates.append(path + [root.char])\n for char in root.children:\n self._get_duplicates(root.children[char], path + [root.char])\n\n def get_duplicates(self):\n current_node = self.root\n for char in current_node.children:\n self._get_duplicates(current_node.children[char], [])\n return self.duplicates\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n trie = Trie()\n for path in paths:\n trie.insert(path)\n trie.hash_children()\n return trie.get_duplicates()\n``` | 1 | 1 | ['Depth-First Search', 'Trie', 'Python3'] | 0 |
delete-duplicate-folders-in-system | Javascript/Typescript Trie Simple Solution (100% faster than the rest) | javascripttypescript-trie-simple-solutio-8ghd | Based on solution by @GeorgeChryso, added sorting while creating hash to work with scenario when sub folders are arranged in unsorted manner. (like a folder x c | gourav_gunjan | NORMAL | 2022-04-24T08:10:13.536391+00:00 | 2022-04-24T08:11:00.253669+00:00 | 254 | false | Based on solution by @GeorgeChryso, added sorting while creating hash to work with scenario when sub folders are arranged in unsorted manner. (like a folder x can have c,d subfolders, and when we create hash for marking these items for deletion, if other folder has d,c folder name, it would not work)\n\n```\nclass PathTrieNode {\n val = \'\';\n children = new Map<string, PathTrieNode>();\n hash = \'\';\n // Parent pointer is important when we are trying to delete nodes.\n parent: PathTrieNode | undefined;\n}\n\nclass PathTrie {\n private root = new PathTrieNode();\n private hashMemMap = new Map<string, PathTrieNode[]>();\n \n /**\n * Builds Trie tree for each path passed in Test Case\n */\n build(path: string[]): void {\n let node = this.root;\n for (let pathVal of path) {\n if (!node.children.has(pathVal)) {\n const childNode = new PathTrieNode();\n childNode.parent = node;\n childNode.val = pathVal;\n node.children.set(childNode.val, childNode);\n node = childNode;\n } else {\n node = node.children.get(pathVal);\n }\n }\n }\n\n /**\n * Creates hash for a node based on it\'s sub folders\n * Note that sorting for subfolder is important.\n */\n createHash(node = this.root): string {\n if (node.children.size === 0) {\n // leaf node\n return node.val;\n }\n let hash = [];\n for (let child of node.children.values()) {\n hash.push(this.createHash(child));\n }\n hash = hash.sort();\n node.hash = hash.join(\'~\');\n if (!this.hashMemMap.has(node.hash)) {\n this.hashMemMap.set(node.hash, [])\n }\n this.hashMemMap.get(node.hash).push(node);\n return node.val + \'[\' + node.hash + \']\';\n }\n \n /**\n * Deletes similar folders as map would contain more than one entries for those.\n * For deleting a node, we go its parent, and delete parent\'s connection to child.\n */\n removeDuplicates(node = this.root): void {\n if (this.hashMemMap.has(node.hash) && this.hashMemMap.get(node.hash).length > 1) {\n for (let deleteNode of this.hashMemMap.get(node.hash)) {\n deleteNode.parent.children.delete(deleteNode.val);\n }\n }\n for (let child of node.children.values()) {\n this.removeDuplicates(child);\n }\n }\n \n /**\n * Stack is used to track folder structure (dfs kind of thing)\n * Res contains actual items.\n */\n toArray(node = this.root, stack = [], res = []): string[][] {\n if (node.val !== \'\') {\n stack.push(node.val);\n res.push([...stack]);\n }\n for (let child of node.children.values()) {\n this.toArray(child, stack, res);\n }\n stack.pop();\n return res;\n }\n \n}\n\nfunction deleteDuplicateFolder(paths: string[][]): string[][] {\n const pathTrie = new PathTrie();\n for (let path of paths) {\n pathTrie.build(path);\n }\n pathTrie.createHash();\n pathTrie.removeDuplicates();\n return pathTrie.toArray();\n};\n\n``` | 1 | 0 | ['TypeScript', 'JavaScript'] | 0 |
delete-duplicate-folders-in-system | A graspable recursive solution using Trie | a-graspable-recursive-solution-using-tri-zpw4 | \nclass Solution {\n class TrieNode{\n StringBuilder sb;\n Map<String, TrieNode> children;\n \n public TrieNode(){\n s | su7ss | NORMAL | 2022-02-21T17:00:08.828644+00:00 | 2022-02-21T17:05:45.589935+00:00 | 244 | false | ```\nclass Solution {\n class TrieNode{\n StringBuilder sb;\n Map<String, TrieNode> children;\n \n public TrieNode(){\n sb = new StringBuilder();\n children = new HashMap<>();\n }\n }\n \n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n TrieNode root = new TrieNode();\n for(List<String> path: paths){\n TrieNode curr = root;\n \n for(String node: path){\n if(!curr.children.containsKey(node)){\n curr.children.put(node, new TrieNode());\n }\n \n curr = curr.children.get(node);\n }\n }\n \n Map<String, Integer> freq = new HashMap<>();\n encode(root, freq);\n List<List<String>> res = new ArrayList<>();\n List<String> path = new ArrayList<>();\n dfs(root, freq, res, path);\n return res;\n }\n \n\t/* \n\t\tEncodes the folders and stores their occurence count in a frequency map\n\t\te.g.: \n\t\t\t\tpaths: [["a"],["c"],["d"],["a","b"],["c","b"],["d","a"]]\n\t\t\t\tfreq: {"b()":2,"a()":1,"a(b())c(b())d(a())":1}\n\t*/\n\t\n private void encode(TrieNode root, Map<String, Integer> freq){\n if(root.children.isEmpty()){\n return;\n }\n \n List<String> list = new ArrayList<>();\n for(Map.Entry<String, TrieNode> entry: root.children.entrySet()){\n String folder = entry.getKey();\n TrieNode child = entry.getValue();\n encode(child, freq);\n list.add(folder + "(" + child.sb.toString() + \')\');\n }\n \n Collections.sort(list);\n \n for(String str: list){\n root.sb.append(str);\n }\n \n String serialStr = root.sb.toString();\n freq.put(serialStr, freq.getOrDefault(serialStr, 0) + 1);\n }\n \n\t/*\n\t\tA simple DFS to discard the folders whose frequency of occurence is greater than 1\n\t*/\n\t\n private void dfs(TrieNode root, Map<String, Integer> freq, List<List<String>> res, List<String> path){\n if(freq.getOrDefault(root.sb.toString(), 0) > 1){\n return;\n }\n \n if(!path.isEmpty()){\n res.add(new ArrayList<>(path));\n }\n \n for(Map.Entry<String, TrieNode> entry: root.children.entrySet()){\n String folder = entry.getKey();\n TrieNode child = entry.getValue();\n path.add(folder);\n dfs(child, freq, res, path);\n path.remove(path.size() - 1);\n }\n }\n}\n``` | 1 | 0 | ['Depth-First Search', 'Trie', 'Recursion'] | 0 |
delete-duplicate-folders-in-system | Trie+ dfs+ serialization | trie-dfs-serialization-by-mohit0749-y1k5 | Approach\n\n Use trieNode to create the trie of folder structure.\n Then do dfs on trie and serialize subtrees as strings, then add nodes which has the same ser | mohit0749 | NORMAL | 2022-02-09T19:19:42.824689+00:00 | 2022-02-09T19:19:42.824718+00:00 | 317 | false | **Approach**\n\n* Use `trieNode` to create the trie of folder structure.\n* Then do dfs on trie and serialize subtrees as strings, then add nodes which has the same serialize string into a dictionary and increment it\'s count,\n* Traverse the trie, if nodes has the same serialize(hash) string and count>1, then it\'s duplicated subtree, we delete those from trie.\n* Traverse the whole trie again to get all the paths.\n\n\n\n```\nfunc deleteDuplicateFolder(paths [][]string) [][]string {\n\ttrie := Trie{&trieNode{make(map[string]*trieNode), ""}, make(map[string]int)}\n\tfor _, path := range paths {\n\t\ttrie.Insert(path)\n\t}\n\ttrie.Serialize()\n\ttrie.DeleteDuplicate()\n\treturn trie.Traverse()\n\n}\n\ntype Trie struct {\n\troot *trieNode\n\tmp map[string]int\n}\ntype trieNode struct {\n\tchild map[string]*trieNode\n\thash string\n}\n\nfunc newTrieNode() *trieNode {\n\treturn &trieNode{make(map[string]*trieNode), ""}\n}\n\nfunc (t *Trie) DeleteDuplicate() {\n\tvar dfs func(*trieNode)\n\tdfs = func(root *trieNode) {\n\t\tif len(root.child) == 0 {\n\t\t\treturn\n\t\t}\n\t\tfor k := range root.child {\n\t\t\tif v, ok := t.mp[root.child[k].hash]; ok && v > 1 {\n\t\t\t\tdelete(root.child, k)\n\t\t\t} else {\n\t\t\t\tdfs(root.child[k])\n\t\t\t}\n\t\t}\n\t}\n\tdfs(t.root)\n}\n\nfunc (t *Trie) Insert(path []string) {\n\troot := t.root\n\tfor _, p := range path {\n\t\tif root.child[p] == nil {\n\t\t\troot.child[p] = newTrieNode()\n\t\t}\n\t\troot = root.child[p]\n\t}\n}\n\nfunc (t Trie) Serialize() {\n\tvar dfs func(*trieNode, string) string\n\tdfs = func(root *trieNode, n string) string {\n\t\tif len(root.child) == 0 {\n\t\t\treturn n\n\t\t}\n\t\thash := ""\n\t\tvar children []string\n\t\tfor k := range root.child {\n\t\t\tchildren = append(children, k)\n\t\t}\n\t\tsort.Strings(children)\n\t\tfor _, k := range children {\n hash += fmt.Sprintf("(%s)",dfs(root.child[k], k))\n\t\t}\n\t\troot.hash = hash\n\t\tt.mp[hash]++\n\t\tfmt.Println("root: ", n, "=", root.hash)\n\t\treturn n + "/" + hash\n\t}\n\tt.root.hash = dfs(t.root, "")\n}\n\nfunc (t Trie) Traverse() [][]string {\n\tpaths := make([][]string, 0)\n\tvar dfs func(root *trieNode, currPath []string)\n\tdfs = func(root *trieNode, currPath []string) {\n\t\tif len(root.child) == 0 {\n\t\t\tif len(currPath) > 0 {\n\t\t\t\tpaths = append(paths, append([]string{}, currPath...))\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif len(currPath) > 0 {\n\t\t\tpaths = append(paths, append([]string{}, currPath...))\n\t\t}\n\t\tfor k, _ := range root.child {\n\t\t\tdfs(root.child[k], append(currPath, k))\n\t\t}\n\t}\n\n\tdfs(t.root, []string{})\n\treturn paths\n}\n``` | 1 | 0 | ['Depth-First Search', 'Trie', 'Go'] | 0 |
delete-duplicate-folders-in-system | Difficult Question mostly from volume of code | difficult-question-mostly-from-volume-of-20uh | \nclass Solution {\n HashMap<String, Set<Trie>> dup = new HashMap<>();\n List<List<String>> cleanedPaths = new ArrayList<>();\n\n class Trie {\n\n | mi1 | NORMAL | 2021-09-05T16:40:03.273633+00:00 | 2021-09-05T16:40:03.273687+00:00 | 194 | false | ```\nclass Solution {\n HashMap<String, Set<Trie>> dup = new HashMap<>();\n List<List<String>> cleanedPaths = new ArrayList<>();\n\n class Trie {\n\n public TreeMap<String, Trie> map = new TreeMap<>();\n public boolean leaf = false;\n public String name;\n public Trie parent;\n public boolean delete = false;\n\n @Override\n public String toString() {\n return name;\n }\n\n public Trie(String name, Trie parent) {\n this.name = name;\n this.parent = parent;\n }\n\n void insert(List<String> path, int idx) {\n if (idx >= path.size()) {\n this.leaf = true;\n return;\n }\n if (!map.containsKey(path.get(idx))) {\n map.put(path.get(idx), new Trie(path.get(idx), this));\n }\n map.get(path.get(idx)).insert(path, idx + 1);\n }\n }\n\n String traverse(Trie root) {\n StringBuilder hash = new StringBuilder();\n for (String key : root.map.keySet()) {\n String curr = traverse(root.map.get(key));\n hash.append(curr).append("#");\n }\n if (root.map.keySet().size() != 0) {\n if (!dup.containsKey(hash.toString())) {\n dup.put(hash.toString(), new HashSet<>());\n }\n dup.get(hash.toString()).add(root);\n }\n return "/" + root.name + hash;\n }\n\n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n Trie t = new Trie("", null);\n for (List<String> folder : paths) {\n t.insert(folder, 0);\n }\n traverse(t);\n for (String key : dup.keySet()) {\n if (dup.get(key).size() > 1) {\n Set<Trie> set = dup.get(key);\n for (Trie trie : set) {\n if (trie.parent != null) {\n trie.parent.map.remove(trie.name);\n }\n }\n }\n }\n allPaths(t, new ArrayList<>());\n return cleanedPaths;\n }\n\n private void allPaths(Trie node, List<String> path) {\n if (!node.name.equals("")) {\n path.add(node.name);\n }\n if (!path.isEmpty()) {\n cleanedPaths.add(new ArrayList<>(path));\n }\n for (String child : node.map.keySet()) {\n allPaths(node.map.get(child), path);\n }\n if (path.size() > 0) {\n path.remove(path.size() - 1);\n }\n }\n}\n``` | 1 | 0 | [] | 0 |
delete-duplicate-folders-in-system | Java | HashMap | Trie | Backtracking | easy to understand | java-hashmap-trie-backtracking-easy-to-u-vk1j | If you know HashMaps and Trie and Backtracking, it should be easy for you to understand the solution\n\njava\nclass Solution {\n static class Node {\n | ahmedash95 | NORMAL | 2021-08-22T14:16:14.991228+00:00 | 2021-08-22T14:18:22.356221+00:00 | 461 | false | If you know HashMaps and Trie and Backtracking, it should be easy for you to understand the solution\n\n```java\nclass Solution {\n static class Node {\n String name;\n Map<String, Node> childern = new HashMap<>();\n\n private String hashCode = null;\n\n public Node(String _name) {\n name = _name;\n }\n\n public void add(List<String> path) {\n Node cur = this;\n for (String file : path) {\n if (!cur.childern.containsKey(file)) {\n cur.childern.put(file, new Node(file));\n }\n cur = cur.childern.get(file);\n }\n }\n\n public String getHashCode() {\n if(hashCode == null) {\n hashCode = computeHash();\n }\n\n return hashCode;\n }\n\n private String computeHash() {\n StringBuilder sb = new StringBuilder();\n List<Node> nodes = new ArrayList<>();\n \n for(Node n: childern.values()) {\n nodes.add(n);\n }\n\n if(nodes.size() == 0)\n return null;\n\n nodes.sort((a, b) -> a.name.compareTo(b.name));\n\n for (Node n: nodes){\n sb.append(\'(\');\n sb.append(n.name + n.getHashCode());\n sb.append(\')\');\n }\n\n return sb.toString();\n }\n }\n\n private static void getGoodFiles(Node node, Map<String, Integer> occurs, List<String> cur, List<List<String>> ans) {\n if(occurs.containsKey(node.getHashCode()) && occurs.get(node.getHashCode()) > 1) return;\n\n cur.add(node.name);\n ans.add(new ArrayList<>(cur));\n\n for(Node n: node.childern.values())\n getGoodFiles(n, occurs, cur, ans);\n\n cur.remove(cur.size()-1);\n }\n\n private static void findOccurs(Node node, Map<String, Integer> occurs) {\n String key = node.getHashCode();\n if(key != null) {\n occurs.put(key, occurs.getOrDefault(node.getHashCode(), 0)+1);\n }\n\n for(Node n: node.childern.values()) {\n findOccurs(n, occurs);\n }\n }\n\n\n static Node root;\n\n public static List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n root = new Node("");\n for (List<String> path : paths)\n root.add(path);\n\n Map<String, Integer> occurs = new HashMap<>();\n findOccurs(root, occurs);\n\n List<List<String>> ans = new ArrayList<>();\n for(Node n: root.childern.values())\n getGoodFiles(n, occurs, new ArrayList<>(), ans);\n\n return ans;\n }\n}\n``` | 1 | 0 | [] | 0 |
delete-duplicate-folders-in-system | Hashing+dfs | hashingdfs-by-feynman_1729_67-0uyt | \nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n prime=1000000000+7\n class node:\n | isparsh_671 | NORMAL | 2021-07-25T10:35:48.292074+00:00 | 2021-07-25T10:35:48.292110+00:00 | 91 | false | ```\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n prime=1000000000+7\n class node:\n def __init__(self,val):\n self.val=val\n self.childs={}\n self.hash=0\n aa=[]\n for l in paths:\n for i in l:\n aa.append(i)\n aa=list(set(aa))\n d={}\n for i in range(len(aa)):\n d[aa[i]]=i+1\n temp=[]\n for p in paths:\n a=[]\n for i in p:\n a.append(d[i])\n temp.append(a)\n paths=temp\n def addChild(root,p,i):\n if i==len(p):\n return\n if p[i] not in root.childs:\n root.childs[p[i]]=node(p[i])\n addChild(root.childs[p[i]],p,i+1)\n tree=node(prime-1)\n for p in paths:\n addChild(tree,p,0)\n \n freq={}\n def dfs(root):\n val=0\n m=1\n for child in sorted(root.childs.keys()):\n if root.childs[child]!=None:\n val=(val+(dfs(root.childs[child])*m%prime))%prime\n m*=2\n val=(val*(len(aa)+1))%prime\n if val not in freq:\n freq[val]=0\n freq[val]+=1\n root.hash=val\n return val+root.val\n res=[]\n ans=[]\n dfs(tree)\n def dfs2(root,curr):\n curr.append(root.val)\n if (freq[root.hash]==1) or len(root.childs)==0:\n res.append(list(curr))\n else:\n curr.pop()\n return\n for child in sorted(root.childs.keys()):\n if root.childs[child]!=None:\n dfs2(root.childs[child],curr)\n curr.pop()\n ll=[]\n dfs2(tree,ll)\n for a in res:\n tmp=[]\n for i in a:\n if i==prime-1:\n continue\n tmp.append(aa[i-1])\n if tmp:\n ans.append(tmp)\n \n return ans\n \n``` | 1 | 1 | [] | 0 |
delete-duplicate-folders-in-system | Java Trie + HashMap + StringBuilder 99 ms faster than 100% | java-trie-hashmap-stringbuilder-99-ms-fa-uzqk | \nclass Node\n{\n String s; // Value of node (folder name)\n String encoded; // Encoded subtree including and below current node\n Map<String, Node> ch | gautamsw51 | NORMAL | 2021-07-25T10:13:15.786340+00:00 | 2021-07-25T10:13:15.786382+00:00 | 144 | false | ```\nclass Node\n{\n String s; // Value of node (folder name)\n String encoded; // Encoded subtree including and below current node\n Map<String, Node> children; // N-ary tree so children taken as hashmap\n Node()\n {\n this.encoded = "";\n this.s = "";\n this.children = new HashMap<>();\n }\n Node(String s)\n {\n this.encoded = "";\n this.s = s;\n this.children = new HashMap<>();\n }\n}\nclass Solution\n{\n Map<String, Integer> freq; // Store frequencies of encoded subtrees\n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths)\n {\n Node root = new Node(); // root node of trie\n for(List<String> path : paths)\n addPath(root, path); // Add all paths to the trie\n freq = new HashMap<>();\n buildHash(root); // Function to find frequency of all subtrees\n List<List<String>> ans = new ArrayList<>(); // Store the required paths\n for(List<String> path : paths)\n if(isValid(root, path)) // Check if the path is not duplicate\n ans.add(path);\n return ans;\n }\n void addPath(Node root, List<String> path)\n {\n // Simple Trie insertion\n for(String s : path)\n {\n if(!root.children.containsKey(s))\n root.children.put(s, new Node(s));\n root = root.children.get(s);\n }\n }\n StringBuilder buildHash(Node root)\n {\n // encode the subtree root as: "root(child1)(child2)(child3)..." but recursively\n StringBuilder ret = new StringBuilder(root.s); // To return\n for(String s : root.children.keySet()) // Recursively solve for all children\n ret = ret.append("(").append(buildHash(root.children.get(s))).append(")"); // "root(child1)(child2)(child3)..."\n String x = new String(new StringBuilder("#").append(ret.substring(root.s.length()))); // replace first value by # beacuse a->b and c->b are duplicate folders (see Example 1)\n \n // x has 1st value replaced by # but ret has nothing replaced\n \n if(ret.length()==root.s.length()) /// Fixed a bug here\n x = root.s; /// Leaf node frequency should not be increased\n else /// See Example 5, Withot this fix, path a->z and b->z got removed\n freq.put(x, freq.getOrDefault(x, 0) + 1); // Increment frequency of current subtree\n root.encoded = x; // Store the encoded subtree in root node\n return ret; // return string of the form "root(child1)(child2)(child3)..."\n }\n boolean isValid(Node root, List<String> path)\n {\n for(int i = 0; i < path.size(); i++)\n {\n root = root.children.get(path.get(i));\n if(freq.getOrDefault(root.encoded, 1) != 1) // There are multiple occurences of this subtree so return invalid path\n return false;\n }\n return true;\n }\n}\n``` | 1 | 0 | [] | 0 |
delete-duplicate-folders-in-system | [Python3] Augmented Trie | python3-augmented-trie-by-chuan-chih-2igm | Use trie to store the folders in system and augment each node with t[\'*\'] that points to the node\'s parent and t[\'**\'] that contains the folder\'s own name | chuan-chih | NORMAL | 2021-07-25T05:22:28.929045+00:00 | 2021-07-25T05:22:28.929085+00:00 | 299 | false | Use trie to store the folders in system and augment each node with `t[\'*\']` that points to the node\'s parent and `t[\'**\']` that contains the folder\'s own name.\nTraverse the trie once to find all the leaves. The folders that contain leaves are then candidates for deletion. \nAt each iteration of the while loop we only delete duplicate folders that contain leaves or folders deleted already. If any folders are deleted, their parents are the candidates for the next iteration. \nOne tricky point is that folders contain duplicate folders but with different names are considered different. So instead of just `del parent[f]`, `leaf_ids.add(id(parent[\'*\' + f]))` adds a fake leaf folder to take that into account.\n```\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n leaves = []\n trie_lambda = lambda: collections.defaultdict(trie_lambda)\n trie = trie_lambda()\n trie[\'*\'] = trie[\'**\'] = \'*\'\n \n for p in paths:\n t = trie\n for f in p:\n parent, t = t, t[f]\n t[\'*\'] = parent\n t[\'**\'] = f\n \n def traverse(t):\n if len(t) == 2:\n leaves.append(t)\n else:\n for k, v in t.items():\n if k[0] != \'*\':\n traverse(v)\n traverse(trie)\n \n leaf_ids = set(id(leaf) for leaf in leaves)\n candidates = {id(leaf[\'*\']):leaf[\'*\'] for leaf in leaves}\n \n while candidates:\n new = {}\n dup = collections.defaultdict(list)\n for cand in candidates.values():\n if any(id(v) not in leaf_ids for k, v in cand.items() if k[0] != \'*\'):\n continue\n dup[\',\'.join(sorted(cand.keys()))].append(cand)\n for k, v in dup.items():\n if len(v) > 1:\n for cand in v:\n f = cand[\'**\']\n parent = cand[\'*\']\n del parent[f]\n leaf_ids.add(id(parent[\'*\' + f]))\n new[id(parent)] = parent\n candidates = new\n path = []\n ans = []\n def dfs(t):\n for f in t:\n if f[0] != \'*\':\n path.append(f)\n ans.append(list(path))\n dfs(t[f])\n path.pop()\n dfs(trie)\n return ans\n```\nDue to some confusion about the input and leaf detection I didn\'t get to finish in time for the contest :( | 1 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | Problem statement explain needed | problem-statement-explain-needed-by-clon-kaiw | [["a"],["a","t"],["a","t","y"],["a","z"],["b"],["b","x"],["b","x","y"],["b","z"]]\nWhy run this test return non empty result?\nI assume that t and x are identic | clonemasterUwU | NORMAL | 2021-07-25T05:09:13.638281+00:00 | 2021-07-25T05:09:13.638324+00:00 | 110 | false | ```[["a"],["a","t"],["a","t","y"],["a","z"],["b"],["b","x"],["b","x","y"],["b","z"]]```\nWhy run this test return non empty result?\nI assume that ```t``` and ```x``` are identical, so ```a``` and ```b``` should be identical (cause has same set of indetical subfolder as well) | 1 | 0 | [] | 0 |
delete-duplicate-folders-in-system | Trie perform DFS 3 times | trie-perform-dfs-3-times-by-harry331-f698 | IntuitionUse trie to create a folder structoreApproachAfter the trie is created do a DFS to store the subtree structure as a string key at each trie node , and | harry331 | NORMAL | 2025-04-10T14:59:31.219123+00:00 | 2025-04-10T14:59:31.219123+00:00 | 1 | false | # Intuition
Use trie to create a folder structore
# Approach
After the trie is created do a DFS to store the subtree structure as a string key at each trie node , and use map to count the occourance of the string , whenever the count on this map is greater than 1 mark the node as deleted .
AT the end do a DFS avoiding all the deleted nodes
# Complexity
- Time complexity:
<!-- Add your time complexity here, e.g. $$O(n)$$ -->
- Space complexity:
<!-- Add your space complexity here, e.g. $$O(n)$$ -->
# Code
```cpp []
struct Trie {
unordered_map<string, Trie*> children;
string subFolderStr;
bool isDeleted = false;
};
class Solution {
public:
Trie* root;
unordered_map<string, int> subPathCount;
void AddToTrie(vector<string>& path) {
Trie* node = root;
for (const string& folder : path) {
if (!node->children.count(folder)) {
node->children[folder] = new Trie();
}
node = node->children[folder];
}
}
string dfsSerialize(Trie* node) {
if (node->children.empty()) {
node->subFolderStr = "";
return "";
}
vector<pair<string, string>> childReps;
for (auto& [name, child] : node->children) {
string rep = dfsSerialize(child);
childReps.emplace_back(name, rep);
}
sort(childReps.begin(), childReps.end());
string key;
for (auto& [name, rep] : childReps) {
key += "(" + name + rep + ")";
}
node->subFolderStr = key;
subPathCount[key]++;
return key;
}
void dfsMarkDuplicates(Trie* node) {
if (node->subFolderStr != "" && subPathCount[node->subFolderStr] > 1) {
node->isDeleted = true;
return;
}
for (auto& [name, child] : node->children) {
dfsMarkDuplicates(child);
}
}
void dfsCollect(Trie* node, vector<string>& path, vector<vector<string>>& result) {
for (auto& [name, child] : node->children) {
if (child->isDeleted) continue;
path.push_back(name);
result.push_back(path);
dfsCollect(child, path, result);
path.pop_back();
}
}
vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {
root = new Trie();
// 1. Build Trie
for (auto& path : paths) {
AddToTrie(path);
}
dfsSerialize(root);
dfsMarkDuplicates(root);
vector<vector<string>> result;
vector<string> tempPath;
dfsCollect(root, tempPath, result);
return result;
}
};
``` | 0 | 0 | ['C++'] | 0 |
delete-duplicate-folders-in-system | Find the solution in podcast link below | find-the-solution-in-podcast-link-below-xfwk8 | Listed and read the complete Answer to this quesion
https://open.substack.com/pub/hustlercoder/p/deleting-duplicate-folders-a-pythonic-201?r=1f5fq7&utm_campaign | abmishra1234 | NORMAL | 2025-03-14T20:54:03.749611+00:00 | 2025-03-14T20:54:03.749611+00:00 | 5 | false | Listed and read the complete Answer to this quesion
https://open.substack.com/pub/hustlercoder/p/deleting-duplicate-folders-a-pythonic-201?r=1f5fq7&utm_campaign=post&utm_medium=web&showWelcomeOnShare=false
# Code
```python3 []
from typing import List, Dict
class Solution:
def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:
# Step 1: Build the folder tree using a nested dictionary.
root = {}
for path in paths:
curr = root
for folder in path:
if folder not in curr:
curr[folder] = {}
curr = curr[folder]
# Counter for subtree serializations and a map to store serialization for each node (by id)
counter: Dict[str, int] = {}
serial_map: Dict[int, str] = {}
# DFS to serialize each subtree.
def serialize(node: Dict[str, dict]) -> str:
# Base: empty node returns empty string.
serial = ""
for folder in sorted(node.keys()):
# Build serialization string for each child.
child_serial = folder + "(" + serialize(node[folder]) + ")"
serial += child_serial
# Save the serialization for the current node.
serial_map[id(node)] = serial
counter[serial] = counter.get(serial, 0) + 1
return serial
serialize(root)
# Step 2: DFS to remove duplicate subtrees.
def remove_duplicates(node: Dict[str, dict]) -> None:
# Iterate over a list of keys to avoid runtime error while modifying the dictionary.
for folder in list(node.keys()):
child = node[folder]
# If the child's subtree is non-empty and its serialization appears more than once,
# it is a duplicate and should be removed.
if serial_map[id(child)] != "" and counter[serial_map[id(child)]] > 1:
del node[folder]
else:
remove_duplicates(child)
remove_duplicates(root)
# Step 3: Collect the remaining folder paths.
result = []
def collect_paths(node: Dict[str, dict], path: List[str]) -> None:
for folder in sorted(node.keys()):
new_path = path + [folder]
result.append(new_path)
collect_paths(node[folder], new_path)
collect_paths(root, [])
return result
``` | 0 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | 🐍 Python | Beats 98.46% | Easy-to-Read Solution 🐍 | python-beats-9846-easy-to-read-solution-gvhdo | IntuitionBuild a folder tree (trie) from the paths.
Serialize each subtree into a unique string.
If two subtrees have the same serialization, they’re duplicates | SoloCoding | NORMAL | 2025-02-11T17:14:43.727923+00:00 | 2025-02-11T17:14:43.727923+00:00 | 14 | false | # Intuition
Build a folder tree (trie) from the paths.
Serialize each subtree into a unique string.
If two subtrees have the same serialization, they’re duplicates.
Mark duplicate folders (and all their subfolders) for deletion.
Collect paths from the remaining tree.
# Approach
Build the Tree: Convert the list of paths into a nested dictionary structure.
Serialize Subtrees: Use DFS to create a string for each subtree (sorting children ensures consistency).
Mark Duplicates: For any serialization that appears more than once, mark those nodes for deletion.
Collect Remaining Paths: Traverse the tree to gather paths that are not marked for deletion.
# Complexity
Time: O(P × L × log(L)), where P is the number of paths and L is the average path length (due to sorting at each node).
Space: O(P × L) for the tree and recursion stack.
# Code
```python3 []
from collections import defaultdict
from typing import List
class Solution:
def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:
# Build the folder tree as a nested dictionary.
tree = {}
for path in paths:
node = tree
for folder in path:
node = node.setdefault(folder, {})
# Dictionary to map serialized subtree representations to the list of nodes with that structure.
duplicates = defaultdict(list)
# Serialize the folder tree using DFS.
def serialize(node):
# Base case: if the node is empty (no subfolders), return a fixed representation.
if not node:
return "()"
# Process children in sorted order to ensure that identical structures produce the same serialization.
children_serialization = "".join(child + serialize(child_node) for child, child_node in sorted(node.items()))
# Wrap the serialization with parentheses.
serial = "(" + children_serialization + ")"
# Map this serialization to the current node.
duplicates[serial].append(node)
return serial
# Serialize the whole tree to populate the 'duplicates' dictionary.
serialize(tree)
# Mark duplicate nodes (and their entire subtrees) for deletion.
# If a serialized structure appears more than once, all corresponding nodes are marked.
for nodes in duplicates.values():
if len(nodes) > 1:
for node in nodes:
node.clear() # Remove all subfolders.
node["#"] = True # Mark the node as deleted.
# Collect the remaining folder paths (those not marked for deletion).
ans = []
def collect_paths(node, path):
# Iterate over children without creating an extra list copy.
for child_name, child_node in node.items():
# Skip nodes that have been marked for deletion.
if "#" in child_node:
continue
new_path = path + [child_name]
ans.append(new_path)
collect_paths(child_node, new_path)
collect_paths(tree, [])
return ans
``` | 0 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | 1948. Delete Duplicate Folders in System | 1948-delete-duplicate-folders-in-system-w6qrd | IntuitionApproachComplexity
Time complexity:
Space complexity:
Code | G8xd0QPqTy | NORMAL | 2025-01-16T13:06:27.316659+00:00 | 2025-01-16T13:06:27.316659+00:00 | 13 | false | # Intuition
<!-- Describe your first thoughts on how to solve this problem. -->
# Approach
<!-- Describe your approach to solving the problem. -->
# Complexity
- Time complexity:
<!-- Add your time complexity here, e.g. $$O(n)$$ -->
- Space complexity:
<!-- Add your space complexity here, e.g. $$O(n)$$ -->
# Code
```python3 []
from collections import defaultdict
from typing import List
class Directory:
def __init__(self):
self.subdirectories = defaultdict(Directory)
self.marked_for_deletion = False
class Solution:
def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:
def get_signature(directory):
signature = "(" + "".join(subdir + get_signature(directory.subdirectories[subdir]) for subdir in directory.subdirectories) + ")"
if signature != "()":
signature_map[signature].append(directory)
return signature
def collect_unique_paths(directory, current_path):
for subdir in directory.subdirectories:
if not directory.subdirectories[subdir].marked_for_deletion:
collect_unique_paths(directory.subdirectories[subdir], current_path + [subdir])
if current_path:
result_paths.append(current_path[:])
signature_map, root_directory, result_paths = defaultdict(list), Directory(), []
for path in sorted(paths):
current_directory = root_directory
for part in path:
current_directory = current_directory.subdirectories[part]
get_signature(root_directory)
for directories in signature_map.values():
if len(directories) > 1:
for directory in directories:
directory.marked_for_deletion = True
collect_unique_paths(root_directory, [])
return result_paths
``` | 0 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | Beats 75% in Java | beats-75-in-java-by-matylewicz-xxuh | Intuition\nDirectories that are duplicate are the ones that have exacly the same structure of subdirectories. So the dir that should be marked for deletion is d | matylewicz | NORMAL | 2024-11-20T02:24:11.950365+00:00 | 2024-11-20T02:24:11.950400+00:00 | 7 | false | # Intuition\nDirectories that are duplicate are the ones that have exacly the same structure of subdirectories. So the dir that should be marked for deletion is defined by a hash of all it\'s subdirectories.\n\n# Approach\n1. Build a trie\n2. Each node should have a hash that comprises of all the children, for example A(X()+B()) where A is parent dir and has two empty subdirs X and B\n3. Collect all hashes into a Map<ChildrenHash, List<Dir>>\n4. Find ChildrenHash that have more than 1 Dir\n5. Prune all duplicated Dirs found in step 4\n6. Serialize the Trie \n\n# Complexity\n- Time complexity:\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\n\n- Space complexity:\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->\n\n# Code\n```java []\nclass Solution {\n class Dir {\n Map<String, Dir> children = new HashMap<>();\n String name;\n Dir parent;\n String childrenHash = null;\n\n @Override\n public String toString() {\n return name;\n }\n\n Dir(Dir parent, String name) {\n this.parent = parent;\n this.name = name;\n }\n\n String hash() {\n if (childrenHash != null) {\n return childrenHash;\n }\n\n if (children.isEmpty()) {\n return "";\n }\n\n List<Dir> childList = new ArrayList<>(children.values());\n Collections.sort(childList, (a, b) -> a.name.compareTo(b.name));\n\n String[] parts = new String[childList.size()];\n int i = 0;\n for (Dir chDir : childList) {\n parts[i++] = chDir.name + "(" + chDir.hash() + ")";\n }\n\n childrenHash = String.join("+", parts);\n return childrenHash;\n }\n\n void add(List<String> path) {\n Dir cur = this; // A\n\n for (String part : path) { // A B C\n if (!cur.children.containsKey(part)) {\n cur.children.put(part, new Dir(cur, part)); \n }\n\n cur = cur.children.get(part);\n }\n }\n }\n\n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n Dir root = new Dir(null, "/");\n\n for (List<String> path : paths) {\n root.add(path);\n }\n\n // group duplicate leaves\n Map<String, List<Dir>> hashes = new HashMap<>();\n collectHashes(root, hashes);\n\n for (List<Dir> hashDirs : hashes.values()) {\n if (hashDirs.size() > 1) {\n // prune\n for (Dir dir : hashDirs) { // for each dir in hash list\n dir.parent.children.remove(dir.name);\n }\n }\n }\n\n List<List<String>> res = new ArrayList<List<String>>();\n serialize(root, res, new ArrayList<String>());\n return res;\n }\n\n void serialize(Dir dir, List<List<String>> res, List<String> path) {\n List<String> entry = new ArrayList<>(path); // shallow copy\n\n if (dir.parent != null) {\n entry.add(dir.name);\n res.add(entry);\n }\n\n for (Dir child : dir.children.values()) {\n serialize(child, res, entry);\n }\n }\n\n void collectHashes(Dir dir, Map<String, List<Dir>> leaves) {\n if (dir.hash().isEmpty()) { // skip nodes with no children\n return;\n }\n \n leaves.computeIfAbsent(dir.hash(), k -> new ArrayList<>());\n leaves.get(dir.hash()).add(dir);\n\n for (Dir child : dir.children.values()) {\n collectHashes(child, leaves);\n }\n }\n}\n``` | 0 | 0 | ['Java'] | 0 |
delete-duplicate-folders-in-system | Faster than 90% || Serialize Tree || Trie || Clean Java Code | faster-than-90-serialize-tree-trie-clean-p74r | Code\njava []\nclass TrieNode {\n Map<String, TrieNode> children;\n String hashValue = "";\n boolean isEnd = false;\n\n public TrieNode() {\n | youssef1998 | NORMAL | 2024-11-02T20:30:09.908387+00:00 | 2024-11-02T20:30:09.908412+00:00 | 18 | false | # Code\n```java []\nclass TrieNode {\n Map<String, TrieNode> children;\n String hashValue = "";\n boolean isEnd = false;\n\n public TrieNode() {\n this.children = new HashMap<>();\n }\n}\n\nclass Trie {\n final TrieNode root = new TrieNode();\n Map<String, Integer> hashFrequency = new HashMap<>();\n List<List<String>> paths = new ArrayList<>();\n\n public void insert(List<String> path) {\n TrieNode curr = root;\n for (String folder : path) {\n curr = curr.children.computeIfAbsent(folder, _ -> new TrieNode());\n }\n curr.isEnd = true;\n }\n\n public String computeHashes(TrieNode node) {\n if (node.children.isEmpty()) return "";\n\n StringBuilder hashValue = new StringBuilder();\n List<String> childHashes = new ArrayList<>();\n\n for (String childKey : node.children.keySet()) {\n childHashes.add(childKey + "," + computeHashes(node.children.get(childKey)));\n }\n\n Collections.sort(childHashes);\n hashValue.append("(").append(String.join("", childHashes)).append(")");\n node.hashValue = hashValue.toString();\n hashFrequency.put(node.hashValue, hashFrequency.getOrDefault(node.hashValue, 0) + 1);\n\n return node.hashValue;\n }\n\n public void generateUniquePaths(List<String> path) {\n TrieNode curr = root;\n for (String folder : path) {\n TrieNode child = curr.children.get(folder);\n if (!child.hashValue.isEmpty() && hashFrequency.get(child.hashValue) > 1) {\n return;\n }\n curr = child;\n }\n paths.add(path);\n }\n}\n\nclass Solution {\n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n Trie trie = new Trie();\n for (List<String> path : paths) {\n trie.insert(path);\n }\n trie.computeHashes(trie.root);\n for (List<String> path : paths) {\n trie.generateUniquePaths(path);\n }\n return trie.paths;\n }\n}\n``` | 0 | 0 | ['Array', 'Hash Table', 'String', 'Trie', 'Hash Function', 'Java'] | 0 |
delete-duplicate-folders-in-system | 1948. Delete Duplicate Folders in System.cpp | 1948-delete-duplicate-folders-in-systemc-pjdo | Code\n\nstruct TrieNode {\n unordered_map<string, shared_ptr<TrieNode>> children;\n bool deleted = false;\n};\nclass Solution {\n public:\n vector<vector<str | 202021ganesh | NORMAL | 2024-10-24T09:21:33.929200+00:00 | 2024-10-24T09:21:33.929228+00:00 | 0 | false | **Code**\n```\nstruct TrieNode {\n unordered_map<string, shared_ptr<TrieNode>> children;\n bool deleted = false;\n};\nclass Solution {\n public:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n vector<vector<string>> ans;\n vector<string> path;\n unordered_map<string, vector<shared_ptr<TrieNode>>> subtreeToNodes;\n ranges::sort(paths);\n for (const vector<string>& path : paths) {\n shared_ptr<TrieNode> node = root;\n for (const string& s : path) {\n if (!node->children.count(s))\n node->children[s] = make_shared<TrieNode>();\n node = node->children[s];\n }\n }\n buildSubtreeToRoots(root, subtreeToNodes);\n\n for (const auto& [_, nodes] : subtreeToNodes)\n if (nodes.size() > 1)\n for (shared_ptr<TrieNode> node : nodes)\n node->deleted = true;\n constructPath(root, path, ans);\n return ans;\n }\n private:\n shared_ptr<TrieNode> root = make_shared<TrieNode>();\n\n string buildSubtreeToRoots(\n shared_ptr<TrieNode> node,\n unordered_map<string, vector<shared_ptr<TrieNode>>>& subtreeToNodes) {\n string subtree = "(";\n for (const auto& [s, child] : node->children)\n subtree += s + buildSubtreeToRoots(child, subtreeToNodes);\n subtree += ")";\n if (subtree != "()")\n subtreeToNodes[subtree].push_back(node);\n return subtree;\n }\n void constructPath(shared_ptr<TrieNode> node, vector<string>& path,\n vector<vector<string>>& ans) {\n for (const auto& [s, child] : node->children)\n if (!child->deleted) {\n path.push_back(s);\n constructPath(child, path, ans);\n path.pop_back();\n }\n if (!path.empty())\n ans.push_back(path);\n }\n};\n\n``` | 0 | 0 | ['C'] | 0 |
delete-duplicate-folders-in-system | 💥💥Beats 100% on runtime and memory [EXPLAINED] | beats-100-on-runtime-and-memory-explaine-9xe7 | \n\n\n# Intuition\nFind duplicate folder structures in a file system, where two folders are considered identical if they have the same set of subfolders and arr | r9n | NORMAL | 2024-10-20T10:29:11.100531+00:00 | 2024-10-20T10:29:11.100551+00:00 | 1 | false | \n\n\n# Intuition\nFind duplicate folder structures in a file system, where two folders are considered identical if they have the same set of subfolders and arrangement, regardless of their position in the hierarchy.\n\n# Approach\nWe construct a tree representation of the folders, serialize each subtree while masking the current node\'s value to identify duplicates, and then remove any folders that belong to duplicate structures before returning the remaining unique folders.\n\n# Complexity\n- Time complexity:\n O(N log N) due to sorting folder names for serialization, where N is the total number of folders.\n\n- Space complexity:\nO(N) for storing the folder tree and duplicate mapping.\n\n# Code\n```typescript []\nclass Node {\n val: string | null;\n parent: Node | null;\n children: { [key: string]: Node };\n\n constructor(val: string | null, parent: Node | null) {\n this.val = val;\n this.parent = parent;\n this.children = {};\n }\n}\n\n/**\n * Deletes duplicate folders in the file system.\n * \n * @param paths - The input array representing paths to folders.\n * @return The resulting paths after deleting duplicate folders.\n */\nfunction deleteDuplicateFolder(paths: string[][]): string[][] {\n // Step 1: Construct the folder tree from the paths\n const root = new Node(null, null);\n for (const path of paths) {\n let current = root;\n for (const folder of path) {\n // Create child node if it doesn\'t exist\n if (!current.children[folder]) {\n current.children[folder] = new Node(folder, current);\n }\n current = current.children[folder];\n }\n }\n\n // Step 2: Identify duplicate folder structures\n const subTreeMap: { [key: string]: Node[] } = {};\n\n function serializeSubTree(node: Node): string {\n if (Object.keys(node.children).length === 0) {\n return node.val as string; // Leaf node\n }\n\n let serialized = `${node.val},`;\n // Sort children to ensure consistent traversal order\n Object.keys(node.children).sort().forEach((key) => {\n serialized += serializeSubTree(node.children[key]) + ",";\n });\n\n // Create a unique signature for the subtree, masking the current node\'s value\n const maskedSignature = serialized.replace(`${node.val},`, \'*,\');\n if (!subTreeMap[maskedSignature]) {\n subTreeMap[maskedSignature] = [];\n }\n subTreeMap[maskedSignature].push(node); // Store the node reference\n return serialized;\n }\n\n serializeSubTree(root);\n\n // Step 3: Remove duplicate folders\n for (const nodes of Object.values(subTreeMap)) {\n if (nodes.length > 1) {\n nodes.forEach(node => {\n if (node.parent) {\n delete node.parent.children[node.val as string]; // Remove from parent\n }\n });\n }\n }\n\n // Step 4: Generate the final list of remaining folder paths\n const result: string[][] = [];\n\n function generatePaths(node: Node, path: string[]) {\n if (node.val) {\n path.push(node.val);\n result.push([...path]); // Store the current path\n }\n Object.keys(node.children).forEach(key => {\n generatePaths(node.children[key], path); // Recur for children\n });\n path.pop(); // Backtrack to explore other paths\n }\n\n generatePaths(root, []);\n return result;\n}\n\n\n``` | 0 | 0 | ['TypeScript'] | 0 |
delete-duplicate-folders-in-system | Delete Duplicate Folders in System | delete-duplicate-folders-in-system-by-an-cjnc | \n# Approach\n Describe your approach to solving the problem. \nBuild the Trie Structure: Represent the file system as a trie, where each node corresponds to a | Ansh1707 | NORMAL | 2024-10-15T11:57:31.687978+00:00 | 2024-10-15T11:57:31.688010+00:00 | 7 | false | \n# Approach\n<!-- Describe your approach to solving the problem. -->\nBuild the Trie Structure: Represent the file system as a trie, where each node corresponds to a folder. This will allow us to traverse the file structure efficiently.\n\nHashing Subfolder Structures: Traverse the trie and compute a hash for each subfolder\'s structure. The hash will represent the serialized structure of the folder and all its subfolders. We can then detect duplicates by checking if the same hash occurs multiple times.\n\nMark and Delete Duplicates: Identify the folders to delete by marking all nodes with duplicate hashes. Finally, perform a second traversal to collect the remaining non-duplicate folder paths.\n\nTrieNode Class: Represents each node in the trie with a dictionary of children, a hash value for its subtree, and a folder name.\n\nBuilding the Trie: The deleteDuplicateFolder function first constructs a trie representing the folder structure using the given paths.\n\nHashing the Subtrees: The hash_subtree function recursively computes a unique hash for each subtree, which includes the folder name and its children\'s hashes. It stores the hash in the node and updates the subtree_count dictionary.\n\nCollecting Remaining Paths: The collect_paths function performs a DFS to collect non-duplicate folder paths. If a subtree has a duplicate hash, it is skipped.\n\n# Complexity\n- Time complexity:\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\nThe solution runs in O(NlogN), where N is the total number of folders and subfolders. Sorting the children during hashing ensures efficiency.\n\n\n# Code\n```python []\nfrom collections import defaultdict\n\nclass TrieNode:\n def __init__(self):\n self.children = defaultdict(TrieNode)\n self.hash = None # Hash of the subtree rooted at this node\n self.path = "" # Used to store the folder name\n\nclass Solution(object):\n def deleteDuplicateFolder(self, paths):\n """\n :type paths: List[List[str]]\n :rtype: List[List[str]]\n """\n root = TrieNode()\n\n # Step 1: Build the trie from the given paths\n for path in paths:\n current = root\n for folder in path:\n current = current.children[folder]\n current.path = folder\n \n subtree_count = defaultdict(int) # To count occurrences of each subtree hash\n \n # Step 2: Traverse the trie and hash each subtree\n def hash_subtree(node):\n if not node.children:\n return "" # Empty hash for leaf nodes\n \n # Serialize the subtree\n children_hashes = []\n for child_name, child_node in sorted(node.children.items()):\n child_hash = hash_subtree(child_node)\n # Use string concatenation instead of f-string\n children_hashes.append(child_name + "(" + child_hash + ")")\n \n # Create a unique hash for the current subtree\n node.hash = "".join(children_hashes)\n subtree_count[node.hash] += 1\n return node.hash\n \n hash_subtree(root) # Start hashing from the root\n \n # Step 3: Collect non-duplicate folder paths\n result = []\n\n def collect_paths(node, path):\n if subtree_count[node.hash] > 1:\n return # Skip this node and its subfolders as they are marked for deletion\n \n # If not a root node, add the path\n if node.path:\n result.append(list(path)) # Create a copy of the path\n \n # Recursively process children\n for child_name, child_node in node.children.items():\n path.append(child_name)\n collect_paths(child_node, path)\n path.pop()\n \n collect_paths(root, [])\n return result\n\n``` | 0 | 0 | ['Python'] | 0 |
delete-duplicate-folders-in-system | beats 97.77% | beats-9777-by-ranilmukesh-lwqa | \n\n# Code\njava []\nclass Solution {\n class Node {\n Map<String, Node> subNodes = new TreeMap<>();\n\n String content = "";\n\n boolea | ranilmukesh | NORMAL | 2024-10-05T19:08:17.993420+00:00 | 2024-10-05T19:08:17.993457+00:00 | 5 | false | \n\n# Code\n```java []\nclass Solution {\n class Node {\n Map<String, Node> subNodes = new TreeMap<>();\n\n String content = "";\n\n boolean remove = false;\n\n void markRemove() {\n if (remove) {\n return;\n }\n remove = true;\n if (subNodes != null) {\n for (Node value : subNodes.values()) {\n value.markRemove();\n }\n }\n }\n }\n\n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n paths.sort(Comparator.comparingInt(List::size));\n List<Node> nodes = new ArrayList<>(paths.size());\n Node rootNode = new Node();\n for (List<String> pathList : paths) {\n Node current = rootNode;\n int last = pathList.size() - 1;\n for (int i = 0; i < last; i++) {\n String s = pathList.get(i);\n current = current.subNodes.get(s);\n }\n String name = pathList.get(last);\n Node node = new Node();\n current.subNodes.put(name, node);\n nodes.add(node);\n }\n StringBuilder content = new StringBuilder();\n Map<String, Node> nodeByContent = new HashMap<>();\n for (int i = nodes.size() - 1; i >= 0; i--) {\n Node node = nodes.get(i);\n if (node.subNodes.isEmpty()) {\n continue;\n }\n for (Map.Entry<String, Node> entry : node.subNodes.entrySet()) {\n content.append(entry.getKey()).append(\'{\').append(entry.getValue().content).append(\'}\');\n }\n node.content = content.toString();\n content.delete(0, content.length());\n Node similar = nodeByContent.putIfAbsent(node.content, node);\n if (similar != null) {\n node.markRemove();\n similar.markRemove();\n }\n }\n List<List<String>> ans = new ArrayList<>();\n for (int i = 0; i < paths.size(); i++) {\n if (!nodes.get(i).remove) {\n ans.add(paths.get(i));\n }\n }\n return ans;\n }\n}\n``` | 0 | 0 | ['Java'] | 0 |
delete-duplicate-folders-in-system | [Python3] Beats 100% ✅ Working - 20.09.2024 | python3-beats-100-working-20092024-by-pi-0tlh | Code\npython3 []\nclass TrieNode:\n def __init__(self):\n self.child = defaultdict(TrieNode)\n self.delete = False\n def add_word(self, word | Piotr_Maminski | NORMAL | 2024-09-20T01:48:13.759664+00:00 | 2024-09-25T20:18:35.811934+00:00 | 27 | false | # Code\n```python3 []\nclass TrieNode:\n def __init__(self):\n self.child = defaultdict(TrieNode)\n self.delete = False\n def add_word(self, word):\n curr = self\n for c in word:\n curr = curr.child[c]\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n def serialize(root, seen):\n if not root.child: return ""\n keys = []\n for c, child in root.child.items():\n keys.append(c + "#" + serialize(child, seen))\n key = "#" + "".join(sorted(keys)) + "#"\n seen[key].append(root)\n print(key)\n return key\n \n def dfs_get_valid_path(root, path, out):\n for c, child in root.child.items():\n if not child.delete:\n new_path = path + [c]\n out.append(new_path)\n dfs_get_valid_path(child, new_path, out)\n \n root, seen = TrieNode(), defaultdict(list)\n for path in paths:\n root.add_word(path)\n serialize(root, seen)\n print(seen)\n\n for nodes in seen.values():\n if len(nodes) >= 2:\n for node in nodes:\n node.delete = True\n \n ans = []\n dfs_get_valid_path(root, [], ans)\n return ans\n```\n\n\n\n\n | 0 | 0 | ['Python', 'Python3'] | 0 |
delete-duplicate-folders-in-system | C# Tree Serialization and Hashing | c-tree-serialization-and-hashing-by-getr-qwf5 | Intuition\n Describe your first thoughts on how to solve this problem. \n- Tree Representation: When given a list of folder paths, we can represent the file str | GetRid | NORMAL | 2024-09-11T15:21:21.494306+00:00 | 2024-09-11T15:21:21.494343+00:00 | 6 | false | # Intuition\n<!-- Describe your first thoughts on how to solve this problem. -->\n- Tree Representation: When given a list of folder paths, we can represent the file structure as a tree, where each folder is a node, and subfolders are children of that node.\n\n- Duplicate Detection: To detect duplicates, we need a way to uniquely identify the structure of a subtree. A simple idea is to convert the subtree into a string (or other comparable representation) that captures both the folder names and the hierarchical structure. If two folders produce the same string, they must have identical subtrees.\n\n- Serialization: Since the problem involves hierarchical data (subfolders inside folders), it naturally suggests a recursive approach to traverse the structure from the bottom up (post-order traversal), serialize subtrees, and use a dictionary to count occurrences of each subtree.\n\n- Duplicate Pruning: Once we identify subtrees that appear more than once, we can safely prune them, ensuring that any folder containing a duplicate subtree is marked for deletion.\n___\n\n# Approach\n<!-- Describe your approach to solving the problem. -->\n- Build the tree as before, where each node represents a folder.\n- Use a recursive serialization function to convert each subtree to a unique string that can be used for comparison.\n- Track subtree occurrences using a map to count how often each serialized subtree occurs.\n- Mark duplicates based on subtree serialization and recursively traverse the tree to identify and delete duplicates.\n- Return valid paths, ensuring only duplicate subtrees are removed.\n___\n\n# Complexity\n- Time complexity:\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->O(L), where \'L\' is the total length of all paths.\n___\n\n- Space complexity:\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->O(L).\n___\n\n# Code\n```csharp []\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\n\npublic class Solution {\n // Node class to represent the tree structure\n public class Node {\n public string name;\n public Dictionary<string, Node> children;\n public Node(string name) {\n this.name = name;\n this.children = new Dictionary<string, Node>();\n }\n }\n \n public IList<IList<string>> DeleteDuplicateFolder(IList<IList<string>> paths) {\n // Root of the tree\n Node root = new Node("/");\n\n // Build the tree from the given paths\n foreach (var path in paths) {\n AddPath(root, path);\n }\n\n // Dictionary to store serialized subtree structures and their frequency\n Dictionary<string, int> subtreeCount = new Dictionary<string, int>();\n Dictionary<Node, string> nodeToSerialization = new Dictionary<Node, string>();\n \n // Post-order traversal to serialize subtrees and count them\n SerializeSubtree(root, subtreeCount, nodeToSerialization);\n\n // List to collect remaining paths after removing duplicates\n IList<IList<string>> remainingPaths = new List<IList<string>>();\n \n // Traverse the tree again to collect valid paths\n CollectPaths(root, new List<string>(), remainingPaths, subtreeCount, nodeToSerialization);\n \n return remainingPaths;\n }\n \n // Helper method to add a path to the tree\n private void AddPath(Node root, IList<string> path) {\n Node current = root;\n foreach (var dir in path) {\n if (!current.children.ContainsKey(dir)) {\n current.children[dir] = new Node(dir);\n }\n current = current.children[dir];\n }\n }\n \n // Helper method to serialize a subtree and count the frequency of each serialization\n private string SerializeSubtree(Node node, Dictionary<string, int> subtreeCount, Dictionary<Node, string> nodeToSerialization) {\n if (node.children.Count == 0) return ""; // Leaf node\n \n // Serialize the subtree rooted at this node\n List<string> serializedChildren = new List<string>();\n foreach (var child in node.children.Values) {\n serializedChildren.Add(child.name + "(" + SerializeSubtree(child, subtreeCount, nodeToSerialization) + ")");\n }\n \n serializedChildren.Sort(); // Sort to ensure identical structures have the same serialization\n string serialized = string.Join(",", serializedChildren);\n \n // Count the serialized structure\n if (subtreeCount.ContainsKey(serialized)) {\n subtreeCount[serialized]++;\n } else {\n subtreeCount[serialized] = 1;\n }\n \n nodeToSerialization[node] = serialized;\n return serialized;\n }\n \n // Helper method to collect valid paths (those not marked for deletion)\n private void CollectPaths(Node node, List<string> currentPath, IList<IList<string>> remainingPaths, Dictionary<string, int> subtreeCount, Dictionary<Node, string> nodeToSerialization) {\n if (node == null) return;\n\n // Serialize the subtree to check if it has duplicates\n if (nodeToSerialization.ContainsKey(node)) {\n string serialized = nodeToSerialization[node];\n\n // If this subtree occurs more than once, skip this subtree\n if (subtreeCount.ContainsKey(serialized) && subtreeCount[serialized] > 1) {\n return;\n }\n }\n\n // Add the current path to remaining paths if not the root node\n if (!node.name.Equals("/")) {\n remainingPaths.Add(new List<string>(currentPath));\n }\n\n // Recursively collect paths from child nodes\n foreach (var child in node.children) {\n currentPath.Add(child.Key);\n CollectPaths(child.Value, currentPath, remainingPaths, subtreeCount, nodeToSerialization);\n currentPath.RemoveAt(currentPath.Count - 1);\n }\n }\n}\n``` | 0 | 0 | ['Array', 'Hash Table', 'String', 'Trie', 'Hash Function', 'C#'] | 0 |
delete-duplicate-folders-in-system | Python 3: TC O(N log(N)) SC O(N): SHA256 Substructure Hashes | python-3-tc-on-logn-sc-on-sha256-substru-gjtc | Intuition\n\nThis is a pretty nasty problem IMO.\n\nThe key I think is to know either\n substructure hashes exist\n or that you can probably just serialize the | biggestchungus | NORMAL | 2024-08-02T01:58:19.808336+00:00 | 2024-08-02T01:58:19.808366+00:00 | 2 | false | # Intuition\n\nThis is a pretty nasty problem IMO.\n\nThe key I think is to know either\n* substructure hashes exist\n* or that you can probably just serialize the substructures exactly as I saw another submission do and hash those without TLE\n\n## Brute Force, Always Correct\n\nMost solutions to this problem do the following:\n* have a way to serialize (some call it "hashing," but they\'re actually serializing the tree) to a string\n * e.g. [DBabichev](https://leetcode.com/problems/delete-duplicate-folders-in-system/solutions/1361419/python-serialize-subtrees-complexity-analysis-explained/) uses something like `a()b(c(),d())...z()` to serialize a directory with children `a/`, `b/c/`, `b/d/`, etc.\n* traverse the tree, building up the serialized string from the bottom up and adding it to a hashmap\n\nThen any node whose serialized substructure shows up 2+ times in the hashmap is NOT unique and should thus be deleted.\n\n## Approximate, Faster\n\nBuliding hashes from the bottom up becomes quadratic in tree depth because you keep adding a prefix to a long suffix at each level of the tree.\n\nIt\'s fast enough I guess given the bounds on the problem, but not as fast as it could be.\n\nA faster solution is to **hash substructures intead of serializing them**.\n\nWe\'re basically doing a Rabin-Karp approach to finding duplicates in the tree. Each subtree has a hash, and with high probability, if a hash appears 2+ times then we have a duplicate substructure.\n\n### First Attempt: foo * 27 + offset + 1\n\nMy first try was to do my own hashing.\n\nIt failed because of conflicts.\n\nThe tricky thing is that we need a LOT of entropy, each configuration of the tree needs to produce a hash that\'s very different. Otherwise you end up with cases where two different trees have the same hash and causes an error.\n\nMy attempts at a basic *27 + offset + 1 rolling-hash-like thing just weren\'t cutting it. Not enough bits with all the short directory names.\n\n### Second Attempt: Cryptographic Hash\n\nFortunately there\'s a way of getting order-dependent, arity-dependent hashes with cryptographic strength, i.e. it\'s very hard to get duplicate hashes even if you try: cryptographic hashes.\n\nThe SHA family of hashes, really called "digests" because they "digest" a ton of data and produce one binary summary, have the cryptographic strength property.\n\nSo my new algorithm to generate the hash for each node is to\n* sort the children by directory name, so we add things to the hash in a canonical order. Otherwise if we process identical substructures but in a different traversal order we\'d get a different hash (false negative)\n* then\n * add the child name to the digest\n * add the child\'s digest to the current digest\n* then return the digest as we ascend the tree so the parent can make its own digest\n\nThe result is a cryptographic strength hash for the structure with a very VERY low chance of hash collisions.\n\nSeriously. You\'d have to try REALLY REALLY hard to find identical hashes.\n\nYour best best would be a "birthday attack," where you exploit the [Birthday Paradox](https://en.wikipedia.org/wiki/Birthday_problem).\n\nSHA256 has a LOT of bits and therefore even very large inputs will very rarely have a collision.\n\n# Complexity\n- Time complexity: `O(N**2)`. The worst case is where the tree is very shallow so we have to sort `O(N)` children. The SHA256 digest does `O(1)` work per child for each node (albeit a hefty `O(1)`)\n - meanwhile, for `O(N)` levels deep, we would append a length-1 path to the output, and length-2, and length-3, etc. `O(N)` paths of length `O(N)` -> `O(N**2)` \n\n- Space complexity: `O(N)`, we use at most `O(N)` stack memory for a very deep tree, and each path has a hash\n\n# Comments\n\nSubstructure hashes show up when trying to identify isomorphic graphs.\n\nFinding all of them exactly is NP complete, but with hashes you can get a faster algorithm with high probability of no collisions.\n\nYou can also always verify the collisions too to ensure correctness.\n\n# Code\n```\nfrom hashlib import sha256\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n # 2D array paths, paths[i] is for example ["a", "b", "c"] representing /a/b/c/\n #\n # two folders are identical if they have the same nonempty set of subfolders with the same structure\n\n # they need not be at the same level, ugh\n\n # then delete all the duplicates\n \n # following paths builds up a directory trie\n #\n # nodes in that trie that are identical to another node in the trie should both be deleted\n\n # we delete in a single round\n\n # up to\n # 2e4 paths\n # 5e2 entries per path\n # 10 chars per path entry\n # 2e5 chars total across all paths\n\n # all chars are lowercase\n \n # no two paths lead to the same place\n\n # building the directory trie is easy\n\n # identifying identical subtrees is hard, I think NP-hard if you insist on a deterministic 100% algo\n #\n # but we can use a Merkle tree approach, structure hash, etc. to help us\n # suppose the current node in the trie has a subfolder structure hash of x\n # if there\'s another subtrie with the same hash, mark both for deletion\n\n # subtrie hash: need to combine in an order dependent way, or in a canonical order\n\n # suppose we hash names, somehow\n # then the resulting hash can be an xor of names, or we can sort names and do a more RK-like rolling hash\n\n # when we combine hashes we\'ll take priorHash*26+25 at most, so maxHash = maxInt-25 ) // 26\n BIG = (0x7FFFFFFFFFFFFFFF - 26)//27\n\n # if we have a child via name foo with structure hash x, then\n # mix x with the name hash I guess via the same procedure\n #\n # and then do xor across children?\n\n class DirNode:\n def __init__(self):\n self.children = {} # dir name -> child node\n self.hash = 0 # to be filled in later\n\n def add(self, name: str) -> \'DirNode\':\n if name not in self.children:\n self.children[name] = DirNode()\n\n return self.children[name]\n\n root = DirNode()\n\n for path in paths:\n curr = root\n for d in path:\n curr = curr.add(d)\n\n oa = ord(\'a\')\n hash_freqs = defaultdict(int)\n def setHashes(node):\n # hash of node is\n # > take each child trie, get its structure hash, mix with the name\n # > then xor over all children\n\n digester = sha256() # FIX: bring out the big guns\n for dirname, child in sorted(node.children.items()):\n child_hash = setHashes(child)\n\n digester.update(dirname.encode())\n digester.update(child_hash)\n\n\n digest = digester.digest()\n hash_freqs[digest] += 1\n node.hash = digest\n return digest\n\n setHashes(root)\n\n ans = []\n curr_path = []\n def findPaths(node) -> None:\n\n # FIX: only delete if it has the same nonempty set of children and substructure, so don\'t delete leaves b/c set is empty\n if hash_freqs[node.hash] == 1 or not node.children:\n if curr_path:\n ans.append(curr_path[:]) # unique structure hash: this dir should NOT be deleted, append root-to-here path to answer\n\n for dirname, child in node.children.items():\n curr_path.append(dirname)\n findPaths(child)\n curr_path.pop()\n\n # else: this is deleted, so this substructure matches another substructure, so\n # the children are all duplicates too\n\n findPaths(root)\n\n return ans\n``` | 0 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | 👍Runtime 383 ms Beats 100.00% | runtime-383-ms-beats-10000-by-pvt2024-pk55 | Code\n\ntype Node struct {\n\tsubNodes map[string]*Node\n\tcontent string\n\tremove bool\n}\n\nfunc newNode() *Node {\n\treturn &Node{\n\t\tsubNodes: make(ma | pvt2024 | NORMAL | 2024-06-17T00:41:16.095750+00:00 | 2024-06-17T00:41:16.095780+00:00 | 5 | false | # Code\n```\ntype Node struct {\n\tsubNodes map[string]*Node\n\tcontent string\n\tremove bool\n}\n\nfunc newNode() *Node {\n\treturn &Node{\n\t\tsubNodes: make(map[string]*Node),\n\t}\n}\n\nfunc (node *Node) markRemove() {\n\tif node.remove {\n\t\treturn\n\t}\n\tnode.remove = true\n\tfor _, value := range node.subNodes {\n\t\tvalue.markRemove()\n\t}\n}\n\nfunc deleteDuplicateFolder(paths [][]string) [][]string {\n\t// Sort paths by their length\n\tsort.Slice(paths, func(i, j int) bool {\n\t\treturn len(paths[i]) < len(paths[j])\n\t})\n\n\tnodes := make([]*Node, len(paths))\n\trootNode := newNode()\n\n\tfor i, pathList := range paths {\n\t\tcurrent := rootNode\n\t\tlast := len(pathList) - 1\n\t\tfor j := 0; j < last; j++ {\n\t\t\ts := pathList[j]\n\t\t\tif _, exists := current.subNodes[s]; !exists {\n\t\t\t\tcurrent.subNodes[s] = newNode()\n\t\t\t}\n\t\t\tcurrent = current.subNodes[s]\n\t\t}\n\t\tname := pathList[last]\n\t\tnode := newNode()\n\t\tcurrent.subNodes[name] = node\n\t\tnodes[i] = node\n\t}\n\n\tcontent := strings.Builder{}\n\tnodeByContent := make(map[string]*Node)\n\n\tfor i := len(nodes) - 1; i >= 0; i-- {\n\t\tnode := nodes[i]\n\t\tif len(node.subNodes) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfor key, value := range node.subNodes {\n\t\t\tcontent.WriteString(key)\n\t\t\tcontent.WriteString("{")\n\t\t\tcontent.WriteString(value.content)\n\t\t\tcontent.WriteString("}")\n\t\t}\n\t\tnode.content = content.String()\n\t\tcontent.Reset()\n\t\tif similar, exists := nodeByContent[node.content]; exists {\n\t\t\tnode.markRemove()\n\t\t\tsimilar.markRemove()\n\t\t} else {\n\t\t\tnodeByContent[node.content] = node\n\t\t}\n\t}\n\n\tvar ans [][]string\n\tfor i, path := range paths {\n\t\tif !nodes[i].remove {\n\t\t\tans = append(ans, path)\n\t\t}\n\t}\n\n\treturn ans\n}\n``` | 0 | 0 | ['Go'] | 0 |
delete-duplicate-folders-in-system | [Python3] Trie, DFS & Hashing | python3-trie-dfs-hashing-by-timetoai-nvmy | \nclass Node:\n def __init__(self, ind=None):\n self.ind = ind\n self.d = defaultdict(Node)\n\n\nclass Solution:\n def deleteDuplicateFolder | timetoai | NORMAL | 2024-06-10T10:15:16.203227+00:00 | 2024-06-10T10:15:16.203256+00:00 | 36 | false | ```\nclass Node:\n def __init__(self, ind=None):\n self.ind = ind\n self.d = defaultdict(Node)\n\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n root = Node()\n for ind, path in enumerate(paths):\n cur = root\n for node in path:\n cur = cur.d[node]\n cur.ind = ind\n \n hashes = defaultdict(list)\n rev = {}\n # encode: 50 bits node, up to 500 * 50 = 25 000 bit path\n def dfs(node, val, ind):\n if not node:\n return 0\n childs = []\n for child in node.d:\n childs.append(dfs(node.d[child], child, node.d[child].ind))\n cur = hash(tuple(sorted(childs)))\n if ind >= 0:\n hashes[cur].append(ind)\n rev[ind] = cur\n return hash((cur, val)) \n\n dfs(root, \'a\', - 1)\n mark = [False] * len(paths)\n\n def dfs1(node, marked=False):\n if node.ind is not None and node.d and len(hashes[rev[node.ind]]) > 1:\n marked = True\n if node.ind is not None:\n mark[node.ind] = marked\n for child in node.d:\n dfs1(node.d[child], marked)\n\n dfs1(root)\n\n return [paths[i] for i in range(len(paths)) if not mark[i]]\n\n``` | 0 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | Beats 100% by 100ms, actuall hashing, clean code solution | beats-100-by-100ms-actuall-hashing-clean-htf9 | Intuition\nThe trick is to simply hash the directory structure, but without taking into account the current node name.\n\n# Approach\nSimple hash combine, with | teodor_spaeren | NORMAL | 2024-06-10T07:17:19.828411+00:00 | 2024-06-10T07:17:19.828444+00:00 | 52 | false | # Intuition\nThe trick is to simply hash the directory structure, but without taking into account the current node name.\n\n# Approach\nSimple hash combine, with a tree structure. I don\'t actually spend time cleaning up the tree, just marking that they are removed.\n\nThere could of course be false positives here, but the chances are very very small, so I\'m fine with it for leetcode. You could create a second function which actually compares the children, if you wanted.\n\n# Complexity\n- Time complexity: Don\'t Know\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\n\n- Space complexity: $$O(n)$$\n\n\n# Code\n```\ntemplate <class T> inline void hash_combine(std::size_t& seed, const T& v) {\n std::hash<T> hasher;\n seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);\n}\n\n\ninline const auto optimize = []() {\n std::ios::sync_with_stdio(false);\n std::cin.tie(nullptr);\n std::cout.tie(nullptr);\n return 0;\n}();\n\n\n\nclass Solution {\n struct Node {\n std::string_view name;\n std::map<std::string_view, std::unique_ptr<Node>> children;\n bool marked{false};\n\n std::size_t\n hash(std::unordered_map<std::size_t, std::vector<Node*>>& dir) {\n if (children.size() == 0)\n return 0;\n\n // This does NOT include the hash of the name, since we are\n // interested in child hashes.\n std::size_t ourHash = 0;\n for (const auto& child : children) {\n hash_combine(ourHash, child.first);\n auto theirHash = child.second->hash(dir);\n hash_combine(ourHash, theirHash);\n }\n\n dir[ourHash].emplace_back(this);\n return ourHash;\n }\n\n void pushOut(std::vector<std::vector<std::string>>& dirs,\n std::vector<std::string>& cur) {\n if (marked)\n return;\n\n if (name != "/")\n dirs.push_back(cur);\n\n for (const auto& [cName, child] : children) {\n cur.emplace_back(cName);\n child->pushOut(dirs, cur);\n cur.pop_back();\n }\n }\n };\n\npublic:\n static std::vector<std::vector<std::string>>\n deleteDuplicateFolder(const std::vector<std::vector<std::string>>& paths) {\n Node root{"/"sv};\n\n for (const auto& path : paths) {\n Node* cur = &root;\n for (const auto& dir : path) {\n if (auto it = cur->children.find(dir);\n it != cur->children.end()) {\n cur = it->second.get();\n continue;\n }\n\n auto newNode = std::make_unique<Node>(dir);\n auto hey = newNode.get();\n cur->children.emplace(dir, std::move(newNode));\n cur = hey;\n }\n }\n\n // now we just need to\n std::unordered_map<std::size_t, std::vector<Node*>> groups;\n root.hash(groups);\n\n for (const auto& [hash, group] : groups) {\n if (group.size() < 2)\n continue;\n\n for (const auto& node : group)\n node->marked = true;\n }\n\n std::vector<std::vector<std::string>> out;\n std::vector<std::string> tmp;\n root.pushOut(out, tmp);\n return out;\n }\n};\n``` | 0 | 0 | ['Hash Table', 'Trie', 'Hash Function', 'C++'] | 0 |
delete-duplicate-folders-in-system | trie + subtree hashing | trie-subtree-hashing-by-parkcloud-pyb0 | Intuition\n Describe your first thoughts on how to solve this problem. \n\n# Approach\n Describe your approach to solving the problem. \n\n# Complexity\n- Time | parkCloud | NORMAL | 2024-04-29T10:40:38.841355+00:00 | 2024-04-29T10:40:38.841387+00:00 | 13 | false | # Intuition\n<!-- Describe your first thoughts on how to solve this problem. -->\n\n# Approach\n<!-- Describe your approach to solving the problem. -->\n\n# Complexity\n- Time complexity:\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\n\n- Space complexity:\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->\n\n# Code\n```\n/**\n * @param {string[][]} paths\n * @return {string[][]}\n */\nclass TrieNode {\n constructor() {\n this.children = new Map()\n this.del = false\n this.val = \'\'\n }\n}\nclass Trie {\n constructor() {\n this.root = new TrieNode()\n }\n insert(path) {\n let node = this.root\n for (const folder of path) {\n if (!node.children.has(folder)) {\n node.children.set(folder, new TrieNode())\n node.val = folder\n }\n node = node.children.get(folder)\n }\n }\n}\nvar deleteDuplicateFolder = function (paths) {\n paths.sort()\n let trie = new Trie()\n for (const path of paths) {\n trie.insert(path)\n }\n let root = trie.root\n let map = new Map()\n function dfs(node) {\n if (node.children.size === 0) return node.val\n let output = \'\'\n for (const [k, child] of node.children.entries()) {\n output += `${k}${dfs(child)}`\n }\n if (output) {\n if (!map.has(output)) {\n map.set(output, [])\n }\n map.get(output).push(node)\n }\n return node.val + output\n\n }\n dfs(root)\n for (const [k, arr] of map.entries()) {\n if (arr.length > 1) {\n for (const node of arr) {\n node.del = true\n }\n }\n }\n let output = []\n function dfs2(node, path) {\n for (const [k, child] of node.children.entries()) {\n if (!child.del) {\n\n dfs2(child, [...path, k])\n }\n }\n if (path.length) {\n output.push([...path])\n }\n\n }\n dfs2(root, [])\n return output\n};\n``` | 0 | 0 | ['JavaScript'] | 0 |
delete-duplicate-folders-in-system | Kotlin Lovers | Trie | kotlin-lovers-trie-by-treat-3jwn | Intuition\nStore All paths in a map. Check for repeats and mark as true. Then return the non repeat paths. \n\n\n# Approach\n Describe your approach to solving | treat | NORMAL | 2024-01-22T15:52:12.327584+00:00 | 2024-01-22T15:52:12.327626+00:00 | 1 | false | # Intuition\nStore All paths in a map. Check for repeats and mark as true. Then return the non repeat paths. \n\n\n# Approach\n<!-- Describe your approach to solving the problem. -->\n\n# Complexity\n- Time complexity:\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\n\n- Space complexity:\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->\n\n# Code\n```\nclass Solution {\n fun deleteDuplicateFolder(paths: List<List<String>>): List<List<String>> {\n val trie = Trie()\n for(p in paths){\n trie.add(p)\n }\n val map = mutableMapOf<String, Boolean>()\n trie.getSubStructure(map)\n return trie.getNonDuplicates(map)\n }\n}\n\n\nclass Trie(){\n private val head = Node()\n fun add(list: List<String>,i: Int = 0, node: Node= head){\n if(i == list.size){\n return\n }\n if(node.map[list[i]] == null){\n node.map[list[i]] = Node()\n }\n add(list, i + 1, node.map[list[i]]!!)\n }\n\n fun getNonDuplicates(map: MutableMap<String, Boolean>, node: Node = head): List<List<String>>{\n val op = mutableListOf<List<String>>()\n for(k in node.map.keys){\n if(map[node.map[k]!!.sub] != null && map[node.map[k]!!.sub]!!){continue}\n op.add(listOf(k))\n val tempList = getNonDuplicates(map, node.map[k]!!)\n for(t in tempList){\n val te = mutableListOf<String>(k)\n te.addAll(t)\n op.add(te.toList())\n }\n }\n return op.toList()\n }\n\n fun getSubStructure(map: MutableMap<String, Boolean>, node: Node = head){\n val sb = StringBuilder()\n var space = ""\n for(k in node.map.keys){\n sb.append(space)\n space = " "\n getSubStructure(map, node.map[k]!!)\n sb.append(k)\n sb.append("(")\n sb.append(node.map[k]!!.sub)\n sb.append(")")\n }\n node.sub = sb.toString()\n if(node.sub == "") return \n if(map[node.sub]== null){\n map[node.sub] = false\n }else {\n map[node.sub] = true\n }\n return\n }\n}\nclass Node(){\n var sub = ""\n val map = TreeMap<String, Node>()\n}\n``` | 0 | 0 | ['Kotlin'] | 0 |
delete-duplicate-folders-in-system | N-ary tree filesystem with hashing | n-ary-tree-filesystem-with-hashing-by-le-sxg9 | Code\n\ntype treeNode struct {\n selfName string\n childrenHash string\n shouldDelete bool\n children map[string]*treeNode\n}\n\ | lewisHamilton | NORMAL | 2024-01-01T05:54:18.193360+00:00 | 2024-01-01T05:54:18.193390+00:00 | 3 | false | # Code\n```\ntype treeNode struct {\n selfName string\n childrenHash string\n shouldDelete bool\n children map[string]*treeNode\n}\n\ntype fileSystem struct {\n root *treeNode\n hashD map[string]int\n}\n\nfunc newFileSystem() *fileSystem {\n return &fileSystem {\n root: &treeNode {\n selfName : "/",\n childrenHash : "",\n shouldDelete : false,\n children : make(map[string]*treeNode),\n },\n hashD: make(map[string]int),\n }\n}\n\nfunc (fs *fileSystem) mkdir(path []string) {\n curr := fs.root\n for _, pathElement := range path {\n curr = fs.getOrCreateChild(curr, pathElement)\n }\n}\n\nfunc (fs *fileSystem) getOrCreateChild(curr *treeNode, childName string) *treeNode {\n v, ok := curr.children[childName]\n if ok {\n return v\n }\n // Create\n newNode := &treeNode {\n selfName: childName,\n childrenHash: "",\n shouldDelete: false,\n children: make(map[string]*treeNode),\n }\n curr.children[childName] = newNode\n return newNode\n}\n\nfunc (fs *fileSystem) getChild(curr *treeNode, childName string) *treeNode {\n v, ok := curr.children[childName]\n if ok {\n return v\n }\n return nil\n}\n\nfunc (fs *fileSystem) getChildHash(root *treeNode) string {\n if root == nil {\n return ""\n }\n // Encoding style\n // [kid 1{kidEncoding}, kid 2{kidEncoding}, .... , kid n{kidEncoding}]\n // kid i\'s should be sorted in str asc.\n kidNodeMap := make(map[string]*treeNode)\n names := make([]string, 0)\n for _, nodePtr := range root.children {\n kidNodeMap[nodePtr.selfName] = nodePtr\n names = append(names, nodePtr.selfName)\n }\n sort.Strings(names)\n enc := bytes.Buffer{}\n for i, name := range names {\n enc.WriteString(name)\n enc.WriteString("{")\n enc.WriteString(fs.getChildHash(kidNodeMap[name]))\n enc.WriteString("}")\n if i < len(names)-1 {\n enc.WriteString(",")\n }\n }\n enc.WriteString("]")\n encs := enc.String()\n root.childrenHash = encs\n fs.hashD[encs]++\n return encs\n}\n\nfunc (fs *fileSystem) markForDeletion(root *treeNode) {\n if root == nil {\n return\n }\n cnt, ok := fs.hashD[root.childrenHash]\n shouldDel := false\n if ok && cnt >= 2 && len(root.children) > 0 {\n shouldDel = true\n }\n root.shouldDelete = shouldDel\n for _, node := range root.children {\n fs.markForDeletion(node)\n }\n} \n\nfunc (fs *fileSystem) isDeletedPath(path []string) bool {\n curr := fs.root\n for _, pathElement := range path {\n curr = fs.getChild(curr, pathElement)\n if curr == nil || curr.shouldDelete {\n return true\n }\n }\n return false\n}\n\nfunc deleteDuplicateFolder(paths [][]string) [][]string {\n fs := newFileSystem()\n for _, path := range paths {\n fs.mkdir(path)\n }\n fs.getChildHash(fs.root)\n fs.markForDeletion(fs.root)\n result := make([][]string, 0)\n for _, path := range paths {\n if !fs.isDeletedPath(path) {\n result = append(result, path)\n }\n }\n return result\n}\n``` | 0 | 0 | ['Go'] | 0 |
delete-duplicate-folders-in-system | Delete Duplicate Folders in System Solution | delete-duplicate-folders-in-system-solut-s024 | Intuition\n Describe your first thoughts on how to solve this problem. \n\n# Approach\n Describe your approach to solving the problem. \n\n# Complexity\n- Time | DRACULA_1708 | NORMAL | 2023-12-08T05:50:41.120486+00:00 | 2023-12-08T05:50:41.120518+00:00 | 33 | false | # Intuition\n<!-- Describe your first thoughts on how to solve this problem. -->\n\n# Approach\n<!-- Describe your approach to solving the problem. -->\n\n# Complexity\n- Time complexity:\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\n\n- Space complexity:\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->\n\n# Code\n```\nstruct TrieNode {\n unordered_map<string, shared_ptr<TrieNode>> children;\n bool deleted = false;\n};\n\nclass Solution {\n public:\n vector<vector<string>> deleteDuplicateFolder(vector<vector<string>>& paths) {\n vector<vector<string>> ans;\n vector<string> path;\n unordered_map<string, vector<shared_ptr<TrieNode>>> subtreeToNodes;\n\n ranges::sort(paths);\n\n for (const vector<string>& path : paths) {\n shared_ptr<TrieNode> node = root;\n for (const string& s : path) {\n if (!node->children.count(s))\n node->children[s] = make_shared<TrieNode>();\n node = node->children[s];\n }\n }\n\n buildSubtreeToRoots(root, subtreeToNodes);\n\n for (const auto& [_, nodes] : subtreeToNodes)\n if (nodes.size() > 1)\n for (shared_ptr<TrieNode> node : nodes)\n node->deleted = true;\n\n constructPath(root, path, ans);\n return ans;\n }\n\n private:\n shared_ptr<TrieNode> root = make_shared<TrieNode>();\n\n string buildSubtreeToRoots(\n shared_ptr<TrieNode> node,\n unordered_map<string, vector<shared_ptr<TrieNode>>>& subtreeToNodes) {\n string subtree = "(";\n for (const auto& [s, child] : node->children)\n subtree += s + buildSubtreeToRoots(child, subtreeToNodes);\n subtree += ")";\n if (subtree != "()")\n subtreeToNodes[subtree].push_back(node);\n return subtree;\n }\n\n void constructPath(shared_ptr<TrieNode> node, vector<string>& path,\n vector<vector<string>>& ans) {\n for (const auto& [s, child] : node->children)\n if (!child->deleted) {\n path.push_back(s);\n constructPath(child, path, ans);\n path.pop_back();\n }\n if (!path.empty())\n ans.push_back(path);\n }\n};\n``` | 0 | 0 | ['C++'] | 0 |
delete-duplicate-folders-in-system | Javascript - Trie Solution (*can be further optimised) | javascript-trie-solution-can-be-further-1p7io | Code\n\n/**\n * @param {string[][]} paths\n * @return {string[][]}\n */\n //create tree\n //mark duplicates\n //return non duplicates\nclass Node {\n constru | sanketnitk | NORMAL | 2023-12-05T15:45:45.711345+00:00 | 2023-12-05T15:46:02.965766+00:00 | 8 | false | # Code\n```\n/**\n * @param {string[][]} paths\n * @return {string[][]}\n */\n //create tree\n //mark duplicates\n //return non duplicates\nclass Node {\n constructor(value) {\n this.val = value || "";\n this.children = {};\n this.dirStr = "";\n }\n}\n\nclass Trie {\n constructor() {\n this.root = new Node();\n }\n add(path) {\n let node = this.root;\n for (let p of path) {\n if (!node.children[p]) {\n node.children[p] = new Node(p);\n } \n node = node.children[p]; \n }\n }\n dfs = function(node, duplicatesStr) {\n let dirStr = [];\n if (node) {\n for (let child of Object.keys(node.children)) {\n let subTree = this.dfs(node.children[child], duplicatesStr);\n if (duplicatesStr[subTree] == undefined) {\n duplicatesStr[subTree] = false;\n } else {\n duplicatesStr[subTree] = true;\n }\n \n dirStr.push([...subTree].sort().join("") + node.children[child].val);\n }\n \n }\n dirStr.sort();\n node.dirStr = dirStr.toString(); \n return node.dirStr;\n }\n\n removeDuplicateDFS = function(node, duplicatesStr) {\n if (node) {\n for (let child of Object.keys(node.children)) {\n this.removeDuplicateDFS(node.children[child], duplicatesStr);\n if (duplicatesStr[node.children[child].dirStr] == true) {\n delete node.children[child];\n }\n } \n }\n }\n\n print(node, ans, stack) {\n if (node.val != "") {\n stack.push(node.val);\n ans.push([...stack]);\n }\n for (let child of Object.keys(node.children)) {\n this.print(node.children[child], ans, stack);\n }\n stack.pop();\n }\n}\n\nvar deleteDuplicateFolder = function(paths) {\n let trie = new Trie();\n //create tree\n for (let path of paths) {\n trie.add(path);\n }\n //get directoryStructure and mark duplicates\n let duplicatesStr = {}\n trie.dfs(trie.root, duplicatesStr);\n //remove duplicates\n delete duplicatesStr[\'\'];\n //console.log(duplicatesStr);\n trie.removeDuplicateDFS(trie.root, duplicatesStr);\n //console.log(JSON.stringify(trie))\n //parse trie\n let ans = [];\n trie.print(trie.root, ans, []);\n return ans;\n};\n``` | 0 | 0 | ['Depth-First Search', 'Trie', 'JavaScript'] | 0 |
delete-duplicate-folders-in-system | python dfs + trie + hasmap | python-dfs-trie-hasmap-by-harrychen1995-tjs1 | Intuition\n Describe your first thoughts on how to solve this problem. \n\n# Approach\n Describe your approach to solving the problem. \n\n# Complexity\n- Time | harrychen1995 | NORMAL | 2023-11-08T16:32:10.148084+00:00 | 2023-11-08T16:32:10.148117+00:00 | 46 | false | # Intuition\n<!-- Describe your first thoughts on how to solve this problem. -->\n\n# Approach\n<!-- Describe your approach to solving the problem. -->\n\n# Complexity\n- Time complexity:\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\n\n- Space complexity:\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->\n\n# Code\n```\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n \n\n trie = {}\n\n def insert(s):\n current = trie\n for i in s:\n current = current.setdefault(i, {"#":False})\n \n \n for p in paths:\n insert(p)\n table = {}\n def dfs(root):\n if len(root) == 1 and "#" in root:\n return [""]\n ans = []\n for k in root:\n if k == "#":\n continue\n path = dfs(root[k])\n for p in path:\n ans.append(k+p)\n\n ans.sort()\n s="-".join(ans)\n if s not in table:\n table[s]=[]\n table[s].append(root)\n return ans\n\n\n path = dfs(trie)\n path.sort()\n table.pop("-".join(path))\n for s in table:\n if len(table[s]) > 1:\n for node in table[s]:\n node["#"] = True\n \n\n ans = []\n def construct(root, p):\n if "#" in root and root["#"] == True:\n return\n if len(root) == 1 and "#" in root:\n ans.append(p)\n return\n if p:\n ans.append(p)\n for k in root:\n if k == "#":\n continue\n construct(root[k], p+[k])\n \n \n\n construct(trie, [])\n return ans\n \n\n\n``` | 0 | 0 | ['Hash Table', 'Depth-First Search', 'Trie', 'Python3'] | 0 |
delete-duplicate-folders-in-system | Python solution: JSON-style hashing of all subtrees | python-solution-json-style-hashing-of-al-bgia | Intuition\r\n Describe your first thoughts on how to solve this problem. \r\nInitially I tried to build a Trie with the words reversed, but very soon I found it | huikinglam02 | NORMAL | 2023-10-19T05:19:37.606202+00:00 | 2023-10-19T05:19:37.606220+00:00 | 11 | false | # Intuition\r\n<!-- Describe your first thoughts on how to solve this problem. -->\r\nInitially I tried to build a Trie with the words reversed, but very soon I found it gives wrong answers. And there are interesting testcases which essentially requires serialization of subtrees\r\n# Approach\r\n<!-- Describe your approach to solving the problem. -->\r\nI chose the JSON style serialization because it\'s very commonly used. One must remember to use sorted children keys for hashing though. To handle leaves properly, I used a second dfs while keeping track of which node I already trimmed. \r\n# Code\r\n```\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n \'\'\'\r\n This question is about serializing a node\'s children, and removing nodes with identical children\r\n Looking at the example, we notice the answer always is a subset of paths. So we need to decide which to delete.\r\n To serialize a subtree, we use the JSON convention: for example, subtree of the root in examples can be represented by\r\n 1: [{a:b},{c:b},{d:a}]\r\n 2: [{a:{b:{x:y}}},{c:b},{w:y}]\r\n 3: [{a:b},{c:d}]\r\n We can sort paths by length first\r\n Then for each increment length, recognize the parent of each node, build the graph\r\n Then dfs from the top: for eacb children, serialize the subtree.\r\n Save the serialization as key in dict and delete node with keys that appear more than once.\r\n \'\'\'\r\n \r\n def dfs(self, node):\r\n n = len(self.graph[node])\r\n nodesSorted = sorted(self.graph[node], key = lambda x: self.paths[x][-1])\r\n result = ""\r\n if n > 0: result += ":"\r\n if n > 1: result += "["\r\n for i in range(n):\r\n result += "{"\r\n result += self.dfs(nodesSorted[i])\r\n result += "}"\r\n if i < n - 1: result += ","\r\n if n > 1: result += "]"\r\n\r\n if result: \r\n if result not in self.subtreeIndexMap:\r\n self.subtreeIndexMap[result] = set() \r\n self.subtreeIndexMap[result].add(node)\r\n return self.paths[node][-1] + result\r\n\r\n def dfsThrow(self, node):\r\n self.KeysToThrow.add(node)\r\n for child in self.graph[node]:\r\n if child not in self.KeysToThrow:\r\n self.dfsThrow(child)\r\n\r\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\r\n data = [[len(path), i] for i, path in enumerate(paths)]\r\n data.sort()\r\n self.paths = paths\r\n n = len(data)\r\n self.graph = [[] for i in range(n)]\r\n j = 0\r\n cur = 0\r\n prev = set()\r\n serializedPathToIndex = {}\r\n self.subtreeIndexMap = {}\r\n dfsStartNodes = set()\r\n while j < n:\r\n cur += 1\r\n next = set()\r\n while j < n and data[j][0] == cur:\r\n pathSerialized = ",".join(paths[data[j][1]])\r\n parentSerialized = ",".join(paths[data[j][1]][:-1])\r\n if parentSerialized in prev:\r\n self.graph[serializedPathToIndex[parentSerialized]].append(data[j][1])\r\n serializedPathToIndex[pathSerialized] = data[j][1]\r\n next.add(pathSerialized)\r\n j += 1\r\n if cur == 1:\r\n dfsStartNodes = set([serializedPathToIndex[i] for i in next])\r\n prev = next\r\n \r\n for node in dfsStartNodes:\r\n self.dfs(node)\r\n\r\n self.KeysToThrow = set()\r\n for indices in self.subtreeIndexMap.values():\r\n if len(indices) > 1:\r\n for i in indices:\r\n if i not in self.KeysToThrow:\r\n self.dfsThrow(i)\r\n result = []\r\n for i in range(n):\r\n if i not in self.KeysToThrow:\r\n result.append(paths[i])\r\n return result\r\n``` | 0 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | Python Clean sultion( DFS, hashmaps, serialization) with explaination | python-clean-sultion-dfs-hashmaps-serial-2911 | Intuition\n Describe your first thoughts on how to solve this problem. \nThis code is devised to address the problem of identifying and removing duplicate folde | mabdolahi | NORMAL | 2023-09-17T11:35:54.750370+00:00 | 2023-09-17T11:37:04.627480+00:00 | 38 | false | # Intuition\n<!-- Describe your first thoughts on how to solve this problem. -->\nThis code is devised to address the problem of identifying and removing duplicate folder structures within a given set of folder paths. A folder structure is considered duplicate if there exists another folder structure with the exact same structure and folder names.\n\nThe initial intuition for solving this problem is to use a trie (prefix tree) data structure, which allows us to efficiently represent the folder paths in a hierarchical structure. By traversing this trie, we can serialize the folder structures into strings and identify duplicate structures by looking for identical serialized strings.\n\n# Approach\n<!-- Describe your approach to solving the problem. -->\nThe approach followed in the code can be broken down into several steps:\n\n**1. Building the Trie:**\n\n- A trie data structure is built to represent the folder paths. Each node in the trie contains a dictionary representing its children, a boolean variable indicating if it\'s the end of a path, and another boolean variable to mark if it needs to be deleted (if it\'s part of a duplicated folder structure).\n- The insert function is used to insert a path into the trie. It starts from the root and iterates through the folders in the path, adding new nodes as necessary.\n\n**2. Serialization:**\n\n- The serialize function is used to create a string representation of each folder structure in the trie. This function is a recursive function that starts from the root and generates a string representation for each node by concatenating the folder names and the serialized strings of the child nodes (in lexicographical order).\n- All serialized strings are stored in a dictionary, where the key is the serialized string and the value is a list of nodes that have that serialized structure.\n- The serialization process converts the folder structures in the trie into string representations, which can be used to easily identify duplicates. Here\'s a detailed explanation of this process:\n\n - It starts with an opening bracket ( to denote the start of a new folder structure.\n - It then iterates over the child nodes in lexicographical order (hence the sorted function usage). Sorting ensures that two identical folder structures will have the exact same serialized string, facilitating the identification of duplicates.\n - For each child node, it appends the folder name and the serialized string of the child node to the current serialized string.\n - It ends with a closing bracket ) to denote the end of the current folder structure.\n - If the serialized string is not () (which represents an empty folder), it adds the node to the list of nodes with that serialized structure in the dictionary.\n - Finally, it returns the serialized string for the current node.\n\nSerialization is an essential step in this solution because it allows us to convert the hierarchical folder structures into string representations, which can be easily compared to identify duplicates. By representing each folder structure as a unique string, we can efficiently find duplicate structures by looking for identical serialized strings in the dictionary.\n\nSorting the keys (folder names) during the serialization process ensures that two identical folder structures will have the exact same serialized string. Without sorting, the order of child nodes might vary between two identical structures, resulting in different serialized strings and failing to identify them as duplicates. Sorting guarantees a consistent order of child nodes in the serialized string, facilitating the identification of duplicates.\n\n**3. Marking Duplicates:**\n\n- After serialization, nodes that have more than one occurrence in the dictionary (indicating duplicated structures) are marked for deletion by setting their deleted attribute to True.\n\n**4. Extracting Non-Duplicate Paths:**\n\n- Finally, the dfs (Depth-First Search) function traverses the trie to extract the non-duplicate paths. It starts from the root and recursively visits all nodes that are not marked for deletion, collecting the folder names to construct the paths. Here\'s a breakdown of its operations:\n\n - It first checks whether the current node is marked for deletion (deleted = True). If so, it terminates the traversal for the current path since it\'s part of a duplicated folder structure.\n - If the node is not marked for deletion and the current path is not empty, it adds the current path to the result list.\n - It then recursively visits the child nodes that are not marked for deletion, extending the current path with the folder name of each child node.\n\n**5. Result Compilation:**\n\n- The paths collected by the dfs function are then returned as the result.\n\n# Complexity\n**Time complexity:**\n<!-- Add your time complexity here, e.g. $$O(n)$$ -->\nThe time and space complexity of the given code can be analyzed based on the operations being performed at each step. Here\'s the breakdown:\n\n**1.Inserting Paths into the Trie (Step 1):**\n\nIterating over each path and inserting it into the trie takes O(nm) time, where n is the number of paths and m is the maximum length of a path.\n\n**2.Serialization (Step 2):**\n\nThe serialization process involves a depth-first traversal of the trie, visiting each node once. Additionally, at each node, we sort the keys of the children dictionary, which takes \nO(klogk) time, where k is the number of children. Therefore, the time complexity for the serialization process can be approximated to \nO(nmlogm), where \nn is the number of nodes in the trie and \nm is the maximum branching factor (maximum number of children of a node).\n\n**3.Marking Duplicates (Step 3):**\n\nThis step involves iterating over the serialized strings and, for each string with more than one node, marking those nodes as deleted. This takes O(n) time, where n is the number of nodes in the trie.\n\n**4.Extracting Non-Duplicate Paths (Step 5):**\nThis step involves a depth-first traversal of the trie to extract non-duplicate paths, visiting each node once. This takes O(n) time, where n is the number of nodes in the trie.\n\nCombining these, the total time complexity is approximately O(nmlogm).\n\n**Space complexity:**\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->\n\n**1. Trie Structure:**\n\nThe trie structure itself takes O(nm) space to store all the nodes, where n is the number of paths and m is the maximum length of a path.\n\n**2. Serialized Dictionary:**\n\nThe serialized dictionary stores serialized strings for each node in the trie. The total length of all serialized strings can be \nO(nm), where n is the number of nodes and m is the maximum branching factor.\n\n**3. Result List:**\n\nThe result list stores the non-duplicate paths, which can take up to O(nm) space in the worst case.\n\nCombining these, the total space complexity is O(nm).\n\nReference Video: https://www.youtube.com/watch?v=5UGb889i21Y\n\n# Code\n```\nclass TrieNode:\n def __init__(self):\n self.children = {} # key is or folder name, sub directories or the TrieNode is the value\n self.is_end = False # check if it is the child node of the tree\n self.deleted = False # True ifthe subtree should be deleted\n\nclass Solution:\n def deleteDuplicateFolder(self, paths: List[List[str]]) -> List[List[str]]:\n \n # Setting Up a Basic trie Structure\n def insert(path):\n node = root\n for folder in path:\n if folder not in node.children:\n node.children[folder] = TrieNode()\n node = node.children[folder]\n node.is_end = True\n \n # Serialize or hash the subfolders to find the duplicate ones\n def serialize(node):\n ans = \'(\'\n for folder, child in sorted(node.children.items()):\n ans += folder + serialize(child)\n ans += \')\'\n if ans != "()": \n serialized[ans].append(node)\n return ans\n\n # Travers the trie and append non-duplicated folders to the result\n def dfs(node, path):\n if node.deleted:\n return\n if path:\n result.append(path)\n for folder, child in node.children.items():\n if not child.deleted:\n dfs(child, path + [folder])\n\n # Step 1: Insert paths into the trie and build the Trie\n root = TrieNode()\n\n for path in paths:\n insert(path)\n \n #step 2: initiliaze the serialization and serilize the nodes\n serialized = defaultdict(list)\n serialize(root)\n # Step 3: Find the duplciated and mark them\n for nodes in serialized.values():\n if len(nodes) > 1:\n for node in nodes:\n node.deleted = True\n\n # Step 5: travers the trie and print the non duplicated sub trees\n result = []\n dfs(root, [])\n\n return result\n\n``` | 0 | 0 | ['Python3'] | 0 |
delete-duplicate-folders-in-system | O(N) time solution (for dedup), not O(N^2) | on-time-solution-for-dedup-not-on2-by-us-ch25 | Intuition\n\nAssumptions:\nK = sum(paths[i].length)\nN = Number of nodes in folder structure\nW = Length of folder name\nP = paths.length\nD = max(paths[i].leng | user5415h | NORMAL | 2023-05-21T11:13:48.975734+00:00 | 2023-05-21T13:16:23.511767+00:00 | 97 | false | # Intuition\n\nAssumptions:\nK = sum(paths[i].length)\nN = Number of nodes in folder structure\nW = Length of folder name\nP = paths.length\nD = max(paths[i].length)\n\nWe run dedup operation to find common subtrees in the folder structure. Dedup will rewire the references to child nodes to point to the same common subtrees. This allows us to improve the time complexity of dedup to O(N W) (as opposed to O(N^2 W)) where N is the number of nodes in the original folder structure (pre-dedup) and W is length of folder name. \n\n# Approach\n1) Build the folder tree from the given paths. Takes O(K W) time.\n2) Run dedup. If a subtree is common across different parts of the larger tree make sure that same Node object (representing that subtree) is referenced across the different parts of the larger tree. Takes O(N W) time.\n3) Group nodes which have common substructure. Takes O(N W) time.\n4) If a group has 2 or more nodes, mark each of those nodes for removal. Takes O (N) time.\n5) Build final output, ignoring nodes marked for removal. O(N W + D^2 P)\n\n# Complexity\n- Time complexity:\nO (K W + D^2 P)\n\n- Space complexity:\nO (N W)\n\n# Code\n```\nclass Solution {\n \n class Node {\n Integer hash;\n boolean marked;\n String name;\n HashMap<String,Node> children;\n Node(String name){\n this.name = name;\n children = new HashMap<>();\n }\n public int hashCode(){\n if(hash==null) hash = children.hashCode();\n return hash;\n }\n public boolean equals(Object o){\n if (this == o) return true;\n if (o == null) return false;\n if (this.getClass() != o.getClass()) return false;\n Node other = (Node) o;\n if(this.children.size()!=other.children.size()) return false;\n for(String ch:this.children.keySet()){ \n //Only check shallow equality of Node objects (assumption is that they have already been deduped). \n //Checking deep equality would have caused dedup to take O(N^2 W) time.\n if(this.children.get(ch)!=other.children.get(ch)) return false;\n }\n return true;\n }\n }\n \n HashMap<String,HashMap<Node,Node>> dedupMap;\n \n Node dedup(Node node){\n for(String ch:node.children.keySet()){\n Node chNode = dedup(node.children.get(ch));\n node.children.put(ch,chNode);\n }\n HashMap<Node,Node> g = dedupMap.get(node.name);\n if(g==null){\n g = new HashMap<>();\n g.put(node,node);\n dedupMap.put(node.name,g);\n return node;\n } else {\n Node dup = g.get(node);\n if(dup==null){\n g.put(node,node);\n return node;\n } else {\n return dup;\n }\n }\n }\n \n HashMap<Node,List<Node>> groups;\n \n void createGroups(Node node){\n if(!groups.containsKey(node)){\n groups.put(node, new ArrayList<>());\n }\n groups.get(node).add(node);\n for(String ch:node.children.keySet()){\n createGroups(node.children.get(ch));\n }\n }\n \n List<List<String>> ans;\n \n void buildAns(Node node, List<String> path){\n if(node.marked && node.children.size()>0) return;\n path.add(node.name);\n ans.add(new ArrayList<>(path));\n for(String ch:node.children.keySet()){\n buildAns(node.children.get(ch),path);\n }\n path.remove(path.size()-1);\n }\n \n public List<List<String>> deleteDuplicateFolder(List<List<String>> paths) {\n Node root = new Node("/");\n for(List<String> path:paths){\n Node cur = root;\n for(String fold:path){\n if(!cur.children.containsKey(fold)){\n cur.children.put(fold,new Node(fold));\n }\n cur=cur.children.get(fold);\n }\n }\n dedupMap = new HashMap<>();\n root = dedup(root);\n groups = new HashMap<>();\n createGroups(root);\n for(Map.Entry<Node,List<Node>> entry:groups.entrySet()){\n if(entry.getValue().size()>1){\n for(Node node:entry.getValue()) node.marked=true;\n }\n }\n ans = new ArrayList<>();\n for(String ch:root.children.keySet()){\n buildAns(root.children.get(ch), new ArrayList<>());\n }\n return ans;\n }\n}\n``` | 0 | 0 | ['Java'] | 0 |
delete-duplicate-folders-in-system | [Kotlin] Simple kotlin solution with explanation | kotlin-simple-kotlin-solution-with-expla-63yt | Approach\n1. First, build a tree from the lists. \n2. Then, do a dfs pass through to build the serialized content of each dir (there\'s some trickiness here as | motsrox | NORMAL | 2023-03-25T21:45:59.072211+00:00 | 2023-03-25T21:45:59.072299+00:00 | 38 | false | # Approach\n1. First, build a tree from the lists. \n2. Then, do a dfs pass through to build the serialized content of each dir (there\'s some trickiness here as if you\'re not careful a serialization is not neccessarily unique.). While building this serialization, use a hashmap to keep track of how often we\'ve seen each content.\n3. Go through the tree a second time with dfs. Ignore all nodes where the content serialization occurs more than once in the map we\'ve built in the previous step.\n\n# Complexity\n- Time complexity:\n$$O(n) = n *k$$ (k = length of path)\nWe basically do one pass through the list ($$O(n*k)$$) to build the tree, then dfs twice to filter and build the result ($$O(n)$$).\n<!-- Add your space complexity here, e.g. $$O(n)$$ -->\n\n# Code\n```\nclass Solution {\n fun deleteDuplicateFolder(paths: List<List<String>>): List<List<String>> {\n val root = Node("")\n buildTree(root, paths)\n\n val contentOccurences = mutableMapOf<String, Int>()\n buildContent(root, contentOccurences)\n System.out.println(contentOccurences.entries.joinToString())\n\n val output = mutableListOf<List<String>>()\n buildOutput(root, emptyList(), contentOccurences, output)\n return output\n }\n\n private fun buildTree(root: Node, paths: List<List<String>>) {\n paths.forEach { path ->\n var curr = root\n path.forEach { filename ->\n curr = curr.children.find { it.name == filename } ?: \n Node(filename).also{ curr.children.add(it) }\n }\n }\n }\n\n private fun buildContent(root: Node, occurenceMap: MutableMap<String, Int>): String {\n val sep = ";"\n val dirSep = "/"\n if (root.children.isEmpty()) {\n return root.name + dirSep\n }\n val content = root.children\n .sortedBy { it.name } \n .map { buildContent(it, occurenceMap) }\n .joinToString(sep)\n occurenceMap[content] = (occurenceMap[content] ?: 0) + 1\n root.content = content\n return "${root.name}$dirSep${root.children.size}$content"\n }\n\n private fun buildOutput(root: Node,\n currList: List<String>,\n occurenceMap: Map<String, Int>,\n output: MutableList<List<String>>) {\n val isRootOfDir = root.name.isEmpty()\n val appendedList = if (isRootOfDir) currList else currList.plus(root.name)\n if (!isRootOfDir) {\n output.add(appendedList)\n }\n\n root.children\n .filter { it.content.isEmpty() || occurenceMap[it.content] == 1}\n .forEach {\n buildOutput(it, appendedList, occurenceMap, output)\n }\n }\n}\n\ndata class Node(val name: String, var content: String = "", val children: MutableList<Node> = mutableListOf())\n``` | 0 | 0 | ['Kotlin'] | 0 |
delete-duplicate-folders-in-system | Just a runnable solution | just-a-runnable-solution-by-ssrlive-xal9 | Code\n\nuse std::cell::RefCell;\nuse std::collections::BTreeMap;\nuse std::rc::Rc;\n\n#[derive(Debug, Clone, PartialEq, Eq)]\nstruct Node {\n name: String,\n | ssrlive | NORMAL | 2023-02-28T15:10:36.747624+00:00 | 2023-02-28T15:10:36.747668+00:00 | 20 | false | # Code\n```\nuse std::cell::RefCell;\nuse std::collections::BTreeMap;\nuse std::rc::Rc;\n\n#[derive(Debug, Clone, PartialEq, Eq)]\nstruct Node {\n name: String,\n next: BTreeMap<String, Option<Rc<RefCell<Node>>>>,\n del: bool,\n}\n\nimpl Node {\n fn new(name: String) -> Self {\n Self {\n name,\n next: BTreeMap::new(),\n del: false,\n }\n }\n}\n\nimpl Solution {\n pub fn delete_duplicate_folder(paths: Vec<Vec<String>>) -> Vec<Vec<String>> {\n let mut ans = vec![];\n let root = Some(Rc::new(RefCell::new(Node::new("".to_string()))));\n for path in paths {\n Self::add_path(root.clone(), path);\n }\n let mut seen = BTreeMap::new();\n Self::dedupe(root.clone(), &mut seen);\n\n let mut path = vec![];\n for (_, next) in root.as_ref().unwrap().borrow().next.iter() {\n Self::get_path(next.clone(), &mut ans, &mut path);\n }\n\n ans\n }\n\n fn add_path(mut node: Option<Rc<RefCell<Node>>>, path: Vec<String>) {\n for s in path {\n if node.as_ref().unwrap().borrow().next.get(&s).is_none() {\n node.as_mut()\n .unwrap()\n .borrow_mut()\n .next\n .insert(s.clone(), Some(Rc::new(RefCell::new(Node::new(s.clone())))));\n }\n let tmp = node.as_ref().unwrap().borrow().next.get(&s).unwrap().clone();\n node = tmp;\n }\n }\n\n fn dedupe(mut node: Option<Rc<RefCell<Node>>>, seen: &mut BTreeMap<String, Option<Rc<RefCell<Node>>>>) -> String {\n let mut subfolder = String::new();\n for (_, next) in node.as_ref().unwrap().borrow().next.iter() {\n subfolder += &Self::dedupe(next.clone(), seen);\n }\n if !subfolder.is_empty() {\n if seen.contains_key(&subfolder) {\n seen.get_mut(&subfolder).unwrap().as_mut().unwrap().borrow_mut().del = true;\n node.as_mut().unwrap().borrow_mut().del = true;\n } else {\n seen.insert(subfolder.clone(), node.clone());\n }\n }\n format!("({}{})", node.as_ref().unwrap().borrow().name, subfolder)\n }\n\n fn get_path(node: Option<Rc<RefCell<Node>>>, ans: &mut Vec<Vec<String>>, path: &mut Vec<String>) {\n if node.as_ref().unwrap().borrow().del {\n return;\n }\n path.push(node.as_ref().unwrap().borrow().name.clone());\n ans.push(path.clone());\n for (_, next) in node.as_ref().unwrap().borrow().next.iter() {\n Self::get_path(next.clone(), ans, path);\n }\n path.pop();\n }\n}\n\n``` | 0 | 0 | ['Rust'] | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.