content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
list
answers_scores
list
non_answers
list
non_answers_scores
list
tags
list
name
stringlengths
35
137
Q: Why the function doesn't sucsses to sort the list right? I try to built a function that detact an anagrams is a list of word, and gave back a list of all the anagrams by their location in the first list. for example: input: ['deltas', 'retainers', 'desalt', 'pants', 'slated', 'generating', 'ternaries', 'smelters', 'termless', 'salted', 'staled', 'greatening', 'lasted', 'resmelts'] outpoot:[['deltas', 'desalt', 'slated', 'salted', 'staled', 'lasted'], ['retainers', 'ternaries'], ['pants'], ['generating', 'greatening'], ['smelters', 'termless', 'resmelts']] my code is this: def sort_anagrams(list_of_strings): #list_of_strings = tuple(list_of_strings) #print(list_of_strings) sorted_list_of_anagrams =[] for word_1 in list_of_strings: local_list = [] if word_1 in local_list: for word_2 in list_of_strings: if is_anagrams(word_1, word_2) == True: local_list.append(word_2) else: local_list.append(word_1) for word_2 in list_of_strings: if is_anagrams(word_1, word_2) == True: local_list.append(word_2) local_list = sorted(local_list) if sorted(local_list) in sorted(sorted_list_of_anagrams): pass else: sorted_list_of_anagrams.append(local_list) print(sorted_list_of_anagrams) #return sorted_list_of_anagrams def is_anagrams(str_1, str_2): return str_1 != str_2 and sorted(str_1) == sorted(str_2) def create_anagram_list(anagram_list): anagram_list = list(anagram_list) print(anagram_list) first_list = ["deltas", "retainers", "desalt", "pants", "slated", "generating", "ternaries", "smelters", "termless", "salted", "staled", "greatening", "lasted", "resmelts"] sort_anagrams(first_list) it gave me back the anagrams but not in the right order. for example:['resmelts', 'smelters', 'termless'] instead of ['smelters', 'termless', 'resmelts'] A: This was the problematic code: local_list = sorted(local_list) if sorted(local_list) in sorted(sorted_list_of_anagrams): Being that sorted_list_of_anagrams is a list of lists when you sort it it doesn't sort every inner list individually only the outer list. This should work: if sorted(local_list) in [sorted(lst) for lst in sorted_list_of_anagrams]: Also make sure to delete the line above it local_list = sorted(local_list).
Why the function doesn't sucsses to sort the list right?
I try to built a function that detact an anagrams is a list of word, and gave back a list of all the anagrams by their location in the first list. for example: input: ['deltas', 'retainers', 'desalt', 'pants', 'slated', 'generating', 'ternaries', 'smelters', 'termless', 'salted', 'staled', 'greatening', 'lasted', 'resmelts'] outpoot:[['deltas', 'desalt', 'slated', 'salted', 'staled', 'lasted'], ['retainers', 'ternaries'], ['pants'], ['generating', 'greatening'], ['smelters', 'termless', 'resmelts']] my code is this: def sort_anagrams(list_of_strings): #list_of_strings = tuple(list_of_strings) #print(list_of_strings) sorted_list_of_anagrams =[] for word_1 in list_of_strings: local_list = [] if word_1 in local_list: for word_2 in list_of_strings: if is_anagrams(word_1, word_2) == True: local_list.append(word_2) else: local_list.append(word_1) for word_2 in list_of_strings: if is_anagrams(word_1, word_2) == True: local_list.append(word_2) local_list = sorted(local_list) if sorted(local_list) in sorted(sorted_list_of_anagrams): pass else: sorted_list_of_anagrams.append(local_list) print(sorted_list_of_anagrams) #return sorted_list_of_anagrams def is_anagrams(str_1, str_2): return str_1 != str_2 and sorted(str_1) == sorted(str_2) def create_anagram_list(anagram_list): anagram_list = list(anagram_list) print(anagram_list) first_list = ["deltas", "retainers", "desalt", "pants", "slated", "generating", "ternaries", "smelters", "termless", "salted", "staled", "greatening", "lasted", "resmelts"] sort_anagrams(first_list) it gave me back the anagrams but not in the right order. for example:['resmelts', 'smelters', 'termless'] instead of ['smelters', 'termless', 'resmelts']
[ "This was the problematic code:\n local_list = sorted(local_list)\n if sorted(local_list) in sorted(sorted_list_of_anagrams):\n\nBeing that sorted_list_of_anagrams is a list of lists when you sort it it doesn't sort every inner list individually only the outer list. This should work:\nif sorted(local_list) in [sorted(lst) for lst in sorted_list_of_anagrams]:\n\nAlso make sure to delete the line above it local_list = sorted(local_list).\n" ]
[ 1 ]
[]
[]
[ "python", "python_2.7" ]
stackoverflow_0074534014_python_python_2.7.txt
Q: What is the reason of this error I'm getting when using tkinter for a math app Im making a program that will do most of my homework. Im trying to add some ui and it gives errors in my code. Please tell what's wrong. Make it easy enough for a 13 year old to understand because I'm new to python. This gives an error only when i use canvas. If i use window, then it doesn't but i want to use canvas because I can change their position more accurately from tkinter import * root=Tk() canvas1 = Canvas(root, width = 400, height = 300) canvas1.pack() entry1 = Entry (root) canvas1.create_window(200, 140, window=entry1) entry2 = Entry (root) canvas1.create_window(200, 180, window=entry2) entry3 = Entry (root) canvas1.create_window(200, 220, window=entry3) def getvalue(): p=entry1.get() r=entry2.get() t=entry3.get() labelans = Label(root, text = float(p*r*t)/100) canvas1.create_window(200, 230, window=labelans) label1 = Label(root, text="Time") canvas1.create_window(437, 220, window=label1) label2 = Label(root, text="Rate") canvas1.create_window(437,180, window=label2) label3 = Label(root, text="Principal") canvas1.create_window(465, 140, window=label3) button1 = Button(text='Solve!', bg="red", command=getvalue) canvas1.create_window(200, 300, window=button1) mainloop() *And it gives this error Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str' Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str' Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str' Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str' Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str'* A: On line 19-21, I added float. Line 22, I removed float. Also for LABEL widget I I changed x, y location. in line 23, I also place LABEL below ENTRY Here is code: from tkinter import * root=Tk() root.title('Math') canvas1 = Canvas(root, width = 400, height = 320) canvas1.pack() entry1 = Entry (root) canvas1.create_window(200, 140, window=entry1) entry2 = Entry (root) canvas1.create_window(200, 180, window=entry2) entry3 = Entry (root) canvas1.create_window(200, 220, window=entry3) def getvalue(): p = float(entry1.get()) r = float(entry2.get()) t = float(entry3.get() ) labelans = Label(root, text=(p*r*t)/100) canvas1.create_window(200, 250, window=labelans) label1 = Label(root, text="Time") canvas1.create_window(120, 140, window=label1) label2 = Label(root, text="Rate") canvas1.create_window(120,180, window=label2) label3 = Label(root, text="Principal") canvas1.create_window(110, 220, window=label3) button1 = Button(text='Solve!', bg="red", command=getvalue) canvas1.create_window(200, 300, window=button1) mainloop() Result:
What is the reason of this error I'm getting when using tkinter for a math app
Im making a program that will do most of my homework. Im trying to add some ui and it gives errors in my code. Please tell what's wrong. Make it easy enough for a 13 year old to understand because I'm new to python. This gives an error only when i use canvas. If i use window, then it doesn't but i want to use canvas because I can change their position more accurately from tkinter import * root=Tk() canvas1 = Canvas(root, width = 400, height = 300) canvas1.pack() entry1 = Entry (root) canvas1.create_window(200, 140, window=entry1) entry2 = Entry (root) canvas1.create_window(200, 180, window=entry2) entry3 = Entry (root) canvas1.create_window(200, 220, window=entry3) def getvalue(): p=entry1.get() r=entry2.get() t=entry3.get() labelans = Label(root, text = float(p*r*t)/100) canvas1.create_window(200, 230, window=labelans) label1 = Label(root, text="Time") canvas1.create_window(437, 220, window=label1) label2 = Label(root, text="Rate") canvas1.create_window(437,180, window=label2) label3 = Label(root, text="Principal") canvas1.create_window(465, 140, window=label3) button1 = Button(text='Solve!', bg="red", command=getvalue) canvas1.create_window(200, 300, window=button1) mainloop() *And it gives this error Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str' Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str' Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str' Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str' Exception in Tkinter callback Traceback (most recent call last): File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/tkinter/__init__.py", line 1892, in __call__ return self.func(*args) File "/data/user/0/ru.iiec.pydroid3/files/temp_iiec_codefile.py", line 17, in getvalue labelans = Label(root, text = float(p*r*t)/100) TypeError: can't multiply sequence by non-int of type 'str'*
[ "On line 19-21, I added float. Line 22, I removed float. Also for LABEL widget I I changed x, y location. in line 23, I also place LABEL below ENTRY\nHere is code:\nfrom tkinter import *\n\nroot=Tk()\nroot.title('Math')\n\ncanvas1 = Canvas(root, width = 400, height = 320)\ncanvas1.pack()\n\nentry1 = Entry (root) \ncanvas1.create_window(200, 140, window=entry1)\n\nentry2 = Entry (root) \ncanvas1.create_window(200, 180, window=entry2)\n\nentry3 = Entry (root) \ncanvas1.create_window(200, 220, window=entry3)\n\ndef getvalue():\n p = float(entry1.get())\n r = float(entry2.get())\n t = float(entry3.get() )\n labelans = Label(root, text=(p*r*t)/100)\n canvas1.create_window(200, 250, window=labelans)\n\nlabel1 = Label(root, text=\"Time\") \ncanvas1.create_window(120, 140, window=label1)\n\nlabel2 = Label(root, text=\"Rate\")\ncanvas1.create_window(120,180, window=label2)\n\nlabel3 = Label(root, text=\"Principal\")\ncanvas1.create_window(110, 220, window=label3)\n\nbutton1 = Button(text='Solve!', bg=\"red\", command=getvalue)\ncanvas1.create_window(200, 300, window=button1)\n\nmainloop()\n\nResult:\n\n" ]
[ 1 ]
[]
[]
[ "python", "python_3.x", "tkinter" ]
stackoverflow_0072149050_python_python_3.x_tkinter.txt
Q: Python bind Dataclass and TypedDict (Inherit Dataclass from TypedDict) I want to bind somehow my TypedDict (which I'm using for database results type hints) and my Dataclass. I'm not that it may be hard to implement and TypedDict is just a dict in run-time, but anyway. Logically, in point of design and architecture, it sounds sensible, more consistent, and neat. Well, the implementation should not affect the Design. from typing import TypedDict from dataclasses import dataclass class UserDB(TypedDict): id: int # Causing error ValueError: no signature found for builtin type <class 'dict'> @dataclass class UserDC(UserDB): # "id: int" should be inherited from `TypedDict` name: str # Or at least I want: @dataclass class AnotherUserDC: db_data: UserDB Expected: The field UserDC.id (at least type hint) should be inherited from TypedDict Actual: ValueError: no signature found for builtin type <class 'dict'> A: A typing.TypedDict is something fundamentally different from a dataclass - to start, at runtime, it does absolutely nothing, and behaves just as a plain dictionary (but provide the metainformation used to create it). It will accept unknown fields and not-valid types, it works only with the item getting [ ] syntax, and not with the dotted attribute syntax, etc... So, the easier thing to do is to inject the meta-information of your typedicts into the dataclass metainformation, Before running the dataclass decorator. If you don't need the inheritance relationship - i.e., your code will not at any moment ask if an UserDC object is an instance of UserDB, that will work. To make it, a short intermediary decorator will do the job: def inject_fields(parent): def doit(cls): cls.__annotations__ = parent.__annotations__ | cls.__annotations__ return cls return doit # and then: class UserDB(TypedDict): id: int @dataclass @injectfields(UserDB) class UserDC: # "id: int" is injected from `TypedDict` name: str Now, if you will need the OOP inheritance relationship - i.e. isinstance(UserDC(...), UserDB) is expected to return True, I first beg you to review your modelling: it makes little to no sense, as I stated in the start of the answer: in Python one dictionary is a thing, a dataclass is another. If you were dealing with any other types than TypedDict, and still you get to this point, due to whatever reason in your specific setup (say, UserDB comes from a project created by another team), Python would offers the "virtual subclassing" workaround: when classes can answer of subclasses of others without any "physical" inheritance relationship. But typing.TypedDict in particular goes out of this way to ensure even this way is blocked. It is really meant to be a static-typechecking only thing, and not to be used at runtime for nothing else than as a plain dicitionary. Check the related snippet intyping.py, the sourcecode of the typing module: def __subclasscheck__(cls, other): # Typed dicts are only for static structural subtyping. raise TypeError('TypedDict does not support instance and class checks')
Python bind Dataclass and TypedDict (Inherit Dataclass from TypedDict)
I want to bind somehow my TypedDict (which I'm using for database results type hints) and my Dataclass. I'm not that it may be hard to implement and TypedDict is just a dict in run-time, but anyway. Logically, in point of design and architecture, it sounds sensible, more consistent, and neat. Well, the implementation should not affect the Design. from typing import TypedDict from dataclasses import dataclass class UserDB(TypedDict): id: int # Causing error ValueError: no signature found for builtin type <class 'dict'> @dataclass class UserDC(UserDB): # "id: int" should be inherited from `TypedDict` name: str # Or at least I want: @dataclass class AnotherUserDC: db_data: UserDB Expected: The field UserDC.id (at least type hint) should be inherited from TypedDict Actual: ValueError: no signature found for builtin type <class 'dict'>
[ "A typing.TypedDict is something fundamentally different from a dataclass - to start, at runtime, it does absolutely nothing, and behaves just as a plain dictionary (but provide the metainformation used to create it).\nIt will accept unknown fields and not-valid types, it works only with the item getting [ ] syntax, and not with the dotted attribute syntax, etc...\nSo, the easier thing to do is to inject the meta-information of your typedicts into the dataclass metainformation, Before running the dataclass decorator.\nIf you don't need the inheritance relationship - i.e., your code will not at any moment ask if an UserDC object is an instance of UserDB, that will work.\nTo make it, a short intermediary decorator will do the job:\ndef inject_fields(parent):\n def doit(cls):\n cls.__annotations__ = parent.__annotations__ | cls.__annotations__\n return cls\n return doit\n\n\n# and then:\n\n\nclass UserDB(TypedDict):\n id: int\n\n@dataclass\n@injectfields(UserDB)\nclass UserDC:\n # \"id: int\" is injected from `TypedDict`\n name: str\n\nNow, if you will need the OOP inheritance relationship - i.e. isinstance(UserDC(...), UserDB) is expected to return True, I first beg you to review your modelling: it makes little to no sense, as I stated in the start of the answer: in Python one dictionary is a thing, a dataclass is another.\nIf you were dealing with any other types than TypedDict, and still you get to this point, due to whatever reason in your specific setup (say, UserDB comes from a project created by another team), Python would offers the \"virtual subclassing\" workaround: when classes can answer of subclasses of others without any \"physical\" inheritance relationship.\nBut typing.TypedDict in particular goes out of this way to ensure even this way is blocked. It is really meant to be a static-typechecking only thing, and not to be used at runtime for nothing else than as a plain dicitionary.\nCheck the related snippet intyping.py, the sourcecode of the typing module:\n\n def __subclasscheck__(cls, other):\n # Typed dicts are only for static structural subtyping.\n raise TypeError('TypedDict does not support instance and class checks')\n\n\n" ]
[ 1 ]
[]
[]
[ "python", "python_dataclasses", "typeddict" ]
stackoverflow_0074507348_python_python_dataclasses_typeddict.txt
Q: Failed to install package python-ldap I am installing package for odoo15 on windows 11, when i install python-ldap package 3.4.0, I am getting an error and I tried to upgrade pip to the latest version but I can't install it. Can anyone help me install ? I tried very hard but I can't install ldap package on windows 11 A: As per python-ldap documentation, there is Unofficial package for Windows are available on Christoph Gohlke’s page. You can download .whl package file for your python version and then install it as below: pip install path_to_downloaded_whl/file_name.whl
Failed to install package python-ldap
I am installing package for odoo15 on windows 11, when i install python-ldap package 3.4.0, I am getting an error and I tried to upgrade pip to the latest version but I can't install it. Can anyone help me install ? I tried very hard but I can't install ldap package on windows 11
[ "As per python-ldap documentation, there is Unofficial package for Windows are available on Christoph Gohlke’s page.\nYou can download .whl package file for your python version and then install it as below:\n pip install path_to_downloaded_whl/file_name.whl\n" ]
[ 0 ]
[]
[]
[ "odoo_15", "python" ]
stackoverflow_0074472077_odoo_15_python.txt
Q: How to sync slash command globally discord.py I want to sync all slash commands with all guilds in discord.py My code import discord from discord import app_commands from discord.ext import commands intents = discord.Intents.default() client = discord.Client(intents=intents) tree = app_commands.CommandTree(client) @client.event async def on_ready(): print(f'We have logged in as {client.user}') try: await tree.sync(guild=discord.Object(id=11234411)) print(f'Synced') except Exception as e: print(e) @tree.command(name="ping", description="Simple ping pong command", guild=discord.Object(id=1032007648566059142)) async def ping(interaction): await interaction.response.send_message(f"Pong", ephemeral=True) I tried to just delete the guild=discord.Object(id=11234411) But its not working A: You shouldn't sync your commands in on_ready because it's unnecessary and can get you ratelimited. You should create a owner-only command to sync the command tree instead. @tree.command(name='sync', description='Owner only') async def sync(interaction: discord.Interaction): if interaction.user.id == YOUR_ID: await tree.sync() print('Command tree synced.') else: await interaction.response.send_message('You must be the owner to use this command!') You don't need to include guild anywhere because you're syncing global commands. Your extra saftey/security you can create a message command instead.
How to sync slash command globally discord.py
I want to sync all slash commands with all guilds in discord.py My code import discord from discord import app_commands from discord.ext import commands intents = discord.Intents.default() client = discord.Client(intents=intents) tree = app_commands.CommandTree(client) @client.event async def on_ready(): print(f'We have logged in as {client.user}') try: await tree.sync(guild=discord.Object(id=11234411)) print(f'Synced') except Exception as e: print(e) @tree.command(name="ping", description="Simple ping pong command", guild=discord.Object(id=1032007648566059142)) async def ping(interaction): await interaction.response.send_message(f"Pong", ephemeral=True) I tried to just delete the guild=discord.Object(id=11234411) But its not working
[ "You shouldn't sync your commands in on_ready because it's unnecessary and can get you ratelimited. You should create a owner-only command to sync the command tree instead.\n@tree.command(name='sync', description='Owner only')\nasync def sync(interaction: discord.Interaction):\n if interaction.user.id == YOUR_ID:\n await tree.sync()\n print('Command tree synced.')\n else:\n await interaction.response.send_message('You must be the owner to use this command!')\n\nYou don't need to include guild anywhere because you're syncing global commands. Your extra saftey/security you can create a message command instead.\n" ]
[ 0 ]
[]
[]
[ "discord.py", "python" ]
stackoverflow_0074413367_discord.py_python.txt
Q: Run python script that interact word (pywin32) in the batch mode (Task Scheduler/Windows Service) I have written a python script that takes RTF files that my system is creating and converting it in to DOCX format. I accomplished this with pywin32 library. By this library I'm able to open Word and save as DOCX. def ConvertRtfToDocx(path, file): word = win32com.client.Dispatch("Word.Application") wdFormatDocumentDefault = 16 wdHeaderFooterPrimary = 1 doc = word.Documents.Open(path + file) for pic in doc.InlineShapes: try: pic.LinkFormat.SavePictureWithDocument = True except: pass for hPic in doc.sections(1).headers(wdHeaderFooterPrimary).Range.InlineShapes: try: hPic.LinkFormat.SavePictureWithDocument = True except: pass doc.SaveAs(str(path + file.split(".")[0] + ".docx"), FileFormat=wdFormatDocumentDefault) doc.Close() word.Quit() This have to run on demand, as its scanning the directory and converting it as soon it finds it. Long story short, I was able to run it successfully with simple cmd. However, when I ran this as a service (NSSM) or "run as a batch job" its fails. I'm assuming its because the python has no display to open the Word to... My operation system is Windows Server. I will be appreciated if someone could help me with this task. One more note, it will be great if the solution will be use as less third-party software as it possible. A: Its turnout that the problem wasn't about the virtual display. The problem is that Microsoft does not allow use of Office applications in batch mode, by default. That why in 2008 they change the windows in the way that just logon regular users can make a use of Office and Office objects. But there are a few ways you can overcome this obstacle. The easiest way is to create these paths – Windows Server x64 C:\Windows\SysWOW64\config\systemprofile\Desktop Windows Server x86 C:\Windows\System32\config\systemprofile\Desktop This way you are creating the “environment” for the system user (Admin) which is the executor of batch jobs. And you can run it as a batch job via Task Schedular, windows services etc. But if it doesn't work for you this way, I recommend you read those links – Microsoft explanation about interaction with MS objects in a batch- https://www.betaarchive.com/wiki/index.php/Microsoft_KB_Archive/257757 Forum about this issue – https://social.msdn.microsoft.com/Forums/en-US/b81a3c4e-62db-488b-af06-44421818ef91/excel-2007-automation-on-top-of-a-windows-server-2008-x64?forum=innovateonoffice You can also use my repository on Github - https://github.com/TechNapoleon/RTF_TO_DOCX_CONVERTOR
Run python script that interact word (pywin32) in the batch mode (Task Scheduler/Windows Service)
I have written a python script that takes RTF files that my system is creating and converting it in to DOCX format. I accomplished this with pywin32 library. By this library I'm able to open Word and save as DOCX. def ConvertRtfToDocx(path, file): word = win32com.client.Dispatch("Word.Application") wdFormatDocumentDefault = 16 wdHeaderFooterPrimary = 1 doc = word.Documents.Open(path + file) for pic in doc.InlineShapes: try: pic.LinkFormat.SavePictureWithDocument = True except: pass for hPic in doc.sections(1).headers(wdHeaderFooterPrimary).Range.InlineShapes: try: hPic.LinkFormat.SavePictureWithDocument = True except: pass doc.SaveAs(str(path + file.split(".")[0] + ".docx"), FileFormat=wdFormatDocumentDefault) doc.Close() word.Quit() This have to run on demand, as its scanning the directory and converting it as soon it finds it. Long story short, I was able to run it successfully with simple cmd. However, when I ran this as a service (NSSM) or "run as a batch job" its fails. I'm assuming its because the python has no display to open the Word to... My operation system is Windows Server. I will be appreciated if someone could help me with this task. One more note, it will be great if the solution will be use as less third-party software as it possible.
[ "Its turnout that the problem wasn't about the virtual display. The problem is that Microsoft does not allow use of Office applications in batch mode, by default. That why in 2008 they change the windows in the way that just logon regular users can make a use of Office and Office objects.\nBut there are a few ways you can overcome this obstacle.\nThe easiest way is to create these paths –\nWindows Server x64\nC:\\Windows\\SysWOW64\\config\\systemprofile\\Desktop\nWindows Server x86\nC:\\Windows\\System32\\config\\systemprofile\\Desktop\n\nThis way you are creating the “environment” for the system user (Admin) which is the executor of batch jobs. And you can run it as a batch job via Task Schedular, windows services etc.\nBut if it doesn't work for you this way, I recommend you read those links –\nMicrosoft explanation about interaction with MS objects in a batch- https://www.betaarchive.com/wiki/index.php/Microsoft_KB_Archive/257757\nForum about this issue –\nhttps://social.msdn.microsoft.com/Forums/en-US/b81a3c4e-62db-488b-af06-44421818ef91/excel-2007-automation-on-top-of-a-windows-server-2008-x64?forum=innovateonoffice\nYou can also use my repository on Github - https://github.com/TechNapoleon/RTF_TO_DOCX_CONVERTOR\n" ]
[ 0 ]
[]
[]
[ "batch_processing", "ms_word", "python", "pywin32", "windows_server_2012_r2" ]
stackoverflow_0074224332_batch_processing_ms_word_python_pywin32_windows_server_2012_r2.txt
Q: EKS/AKS cluster name convention I am writing a script that receives a Kubernetes context name as an input and outputs the different elements of the cluster -> class GKE: def __init__(self, context): s = context.split("_") self.provider: str = s[0] self.project: str = s[1] self.data_center: GKE.DataCenter = GKE.DataCenter(data_center=s[2]) self.cluster_name: str = s[3] def __str__(self): return f'provider: {self.provider}, project: {self.project}, {self.data_center}, cluster name: {self.cluster_name}' class DataCenter: def __init__(self, data_center: str): s = data_center.split("-") self.country: str = s[0] self.region: str = s[1] self.zone: str = s[2] def __str__(self): return f'country: {self.country}, region: {self.region}, zone: {self.zone}' class EKS: # TODO: What are the fields? What is the convention? pass class AKS: # TODO: What are the fields? What is the convention? pass if __name__ == '__main__': print(GKE(context="gke_XXX-YYY-ZZZ_us-central1-c_name")) Output: provider: gke, project: XXX-YYY-ZZZ, country: us, region: central1, zone: c, cluster name: name This will support only the three main providers (GKE, EKS, AKS). My question is: What are the different elements of EKS and AKS context names? A: You need to differentiate between the correct name of the cluster and the naming schema of a resource. When I run kubectl config get-contexts on the clusters Aks, Eks, and Gke I get the following results: NAME AUTHINFO gke_project-1234_us-central1-c_myGKECluster gke_project-1234_us-central1-c_myGKECluster myAKSCluster clusterUser_myResourceGroup_myAKSCluster arn:aws:eks:eu-west-1:1234:cluster/myEKSCluster arn:aws:eks:eu-west-1:1234:cluster/myEKSCluster In all three clouds, the correct name of the cluster in this example is my***Cluster. The naming scheme in ~/.kube/config is used to distinguish one cluster (contexts wise) from another. For example when you want to change the context with kubectl, then you have to differentiate between cluster whose name is myCluster and is in region-code1 Compared to another cluster whose name is also myCluster but he is in region-code2, and so on, so you will use the naming scheme. GKE: As you wrote, the naming scheme in gke consists of 4 parts: provider_project-id_zone_cluster-name For example gke_project-123_us-central1-c_myGKECluster provider: gke project-id: project-123 zone: us-central1-c cluster-name: myGKECluster AKS: In aks the naming schema is the name of the cluster. But the AUTHINFO, (which is actually the configuration of the user in the kubeconfig file), consists of three parts: Resource-type_Resource-group_Resource-name For example clusterUser_myResourceGroup_myAKSCluster The Resource-type is clusterUser The Resource-group is myResourceGroup The Resource-name is myAKSCluster EKS: AWS requires an ARN when needed to specify a resource unambiguously across all of AWS. The ARN format is arn:partition:service:region:account-id:resource-type/resource-id For example arn:aws:eks:eu-west-1:1234:cluster/myEKSCluster partition: the partition in which the resource is located (such as aws Regions). service: The service namespace that identifies the AWS product (such as eks). region: The Region code (such as eu-west-1). account-id: The ID of the AWS account that owns the resource(such as 1234). resource-type: The resource type (such as cluster). resource-id The resource identifier. This is the name of the resource, the ID of the resource, or a resource path (such as myEKSCluster). Additional resources: https://stackoverflow.com/a/63824179/20571972 https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-cluster.html#aws-resource-eks-cluster-return-values
EKS/AKS cluster name convention
I am writing a script that receives a Kubernetes context name as an input and outputs the different elements of the cluster -> class GKE: def __init__(self, context): s = context.split("_") self.provider: str = s[0] self.project: str = s[1] self.data_center: GKE.DataCenter = GKE.DataCenter(data_center=s[2]) self.cluster_name: str = s[3] def __str__(self): return f'provider: {self.provider}, project: {self.project}, {self.data_center}, cluster name: {self.cluster_name}' class DataCenter: def __init__(self, data_center: str): s = data_center.split("-") self.country: str = s[0] self.region: str = s[1] self.zone: str = s[2] def __str__(self): return f'country: {self.country}, region: {self.region}, zone: {self.zone}' class EKS: # TODO: What are the fields? What is the convention? pass class AKS: # TODO: What are the fields? What is the convention? pass if __name__ == '__main__': print(GKE(context="gke_XXX-YYY-ZZZ_us-central1-c_name")) Output: provider: gke, project: XXX-YYY-ZZZ, country: us, region: central1, zone: c, cluster name: name This will support only the three main providers (GKE, EKS, AKS). My question is: What are the different elements of EKS and AKS context names?
[ "You need to differentiate between the correct name of the cluster and the naming schema of a resource.\nWhen I run kubectl config get-contexts on the clusters Aks, Eks, and Gke I get the following results:\nNAME AUTHINFO\ngke_project-1234_us-central1-c_myGKECluster gke_project-1234_us-central1-c_myGKECluster \nmyAKSCluster clusterUser_myResourceGroup_myAKSCluster\narn:aws:eks:eu-west-1:1234:cluster/myEKSCluster arn:aws:eks:eu-west-1:1234:cluster/myEKSCluster\n\nIn all three clouds, the correct name of the cluster in this example is my***Cluster.\nThe naming scheme in ~/.kube/config is used to distinguish one cluster (contexts wise) from another.\nFor example when you want to change the context with kubectl, then you have to differentiate between cluster whose name is myCluster and is in region-code1 Compared to another cluster whose name is also myCluster but he is in region-code2, and so on, so you will use the naming scheme.\nGKE:\nAs you wrote, the naming scheme in gke consists of 4 parts: provider_project-id_zone_cluster-name\nFor example gke_project-123_us-central1-c_myGKECluster\n\nprovider: gke\nproject-id: project-123\nzone: us-central1-c\ncluster-name: myGKECluster\n\nAKS:\nIn aks the naming schema is the name of the cluster.\nBut the AUTHINFO, (which is actually the configuration of the user in the kubeconfig file), consists of three parts: Resource-type_Resource-group_Resource-name\nFor example clusterUser_myResourceGroup_myAKSCluster\n\nThe Resource-type is clusterUser\nThe Resource-group is myResourceGroup\nThe Resource-name is myAKSCluster\n\nEKS:\n\nAWS requires an ARN when needed to specify a resource unambiguously across all of AWS.\n\nThe ARN format is arn:partition:service:region:account-id:resource-type/resource-id\nFor example arn:aws:eks:eu-west-1:1234:cluster/myEKSCluster\n\npartition: the partition in which the resource is located (such as aws Regions).\nservice: The service namespace that identifies the AWS product (such as eks).\nregion: The Region code (such as eu-west-1).\naccount-id: The ID of the AWS account that owns the resource(such as 1234).\nresource-type: The resource type (such as cluster).\nresource-id The resource identifier. This is the name of the resource, the ID of the resource, or a resource path (such as myEKSCluster).\n\n\nAdditional resources:\nhttps://stackoverflow.com/a/63824179/20571972\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-eks-cluster.html#aws-resource-eks-cluster-return-values\n" ]
[ 1 ]
[]
[]
[ "amazon_eks", "azure_aks", "kubernetes", "naming_conventions", "python" ]
stackoverflow_0074516648_amazon_eks_azure_aks_kubernetes_naming_conventions_python.txt
Q: Idle3 editor fail to open in Fedora 36 I am currently unable to open Idle3 editor. I am running Linux Fedora 36, when idle3 command is issued I get this: Traceback (most recent call last): File "/usr/bin/idle3", line 3, in <module> from idlelib.pyshell import main File "/usr/lib64/python3.10/idlelib/pyshell.py", line 53, in <module> from idlelib import debugger File "/usr/lib64/python3.10/idlelib/debugger.py", line 7, in <module> from idlelib import macosx File "/usr/lib64/python3.10/idlelib/macosx.py", line 7, in <module> from test.support import requires, ResourceDenied ModuleNotFoundError: No module named 'test' I don't know how to solve it. A: Fortunatelly the trouble with Idle3 was solved in the recent update of Fedora 36, I think the missing file was inserted.
Idle3 editor fail to open in Fedora 36
I am currently unable to open Idle3 editor. I am running Linux Fedora 36, when idle3 command is issued I get this: Traceback (most recent call last): File "/usr/bin/idle3", line 3, in <module> from idlelib.pyshell import main File "/usr/lib64/python3.10/idlelib/pyshell.py", line 53, in <module> from idlelib import debugger File "/usr/lib64/python3.10/idlelib/debugger.py", line 7, in <module> from idlelib import macosx File "/usr/lib64/python3.10/idlelib/macosx.py", line 7, in <module> from test.support import requires, ResourceDenied ModuleNotFoundError: No module named 'test' I don't know how to solve it.
[ "Fortunatelly the trouble with Idle3 was solved in the recent update of Fedora 36, I think the missing file was inserted.\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074526000_python.txt
Q: How to fit an image into a larger image? Python, pillow I have an image that I want to fit into another bigger image, so the smaller image is as big as possible. Is there a possible way to do that? It's like resizing without deforming the ratio. I have tried this code: image = Image.open(input_path) x, y = image.size offset_x = math.floor((512 - x) / 2) offset_y = math.ceil((512 - y) / 2) output_image = Image.new(image.mode, (512, 512), (255, 255, 255, 0)) output_image.paste(image, (offset_x, offset_y)) output_image.save(output_path) but it doesn't really work. (note that 512 and 512 are the result sizes) A: Ok, so the answer is the ImageOps.pad() function. I didn't know it existed, so yeah.
How to fit an image into a larger image? Python, pillow
I have an image that I want to fit into another bigger image, so the smaller image is as big as possible. Is there a possible way to do that? It's like resizing without deforming the ratio. I have tried this code: image = Image.open(input_path) x, y = image.size offset_x = math.floor((512 - x) / 2) offset_y = math.ceil((512 - y) / 2) output_image = Image.new(image.mode, (512, 512), (255, 255, 255, 0)) output_image.paste(image, (offset_x, offset_y)) output_image.save(output_path) but it doesn't really work. (note that 512 and 512 are the result sizes)
[ "Ok, so the answer is the ImageOps.pad() function. I didn't know it existed, so yeah.\n" ]
[ 0 ]
[]
[]
[ "image", "python", "python_imaging_library" ]
stackoverflow_0074533695_image_python_python_imaging_library.txt
Q: RuntimeError: DataLoader worker (pid(s) 15876, 2756) exited unexpectedly I am compiling some existing examples from the PyTorch tutorial website. I am working especially on the CPU device no GPU. When running a program the type of error below is shown. Does it become I'm working on the CPU device or setup issue? raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e RuntimeError: DataLoader worker (pid(s) 15876, 2756) exited unexpectedly`. How can I solve it? import torch import torch.functional as F import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader from torchvision import datasets device = 'cpu' if torch.cuda.is_available() else 'cuda' print(device) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))] ) #Store separate training and validations splits in data training_set = datasets.FashionMNIST( root='data', train=True, download=True, transform=transform ) validation_set = datasets.FashionMNIST( root='data', train=False, download=True, transform=transform ) training_loader = DataLoader(training_set, batch_size=4, shuffle=True, num_workers=2) validation_loader = DataLoader(validation_set, batch_size=4, shuffle=False, num_workers=2) classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle Boot') def matplotlib_imshow(img, one_channel=False): if one_channel: img = img.mean(dim=0) img = img/2+0.5 #unnormalize npimg = img.numpy() if one_channel: plt.imshow(npimg, cmap="Greys") else: plt.imshow(np.transpose(npimg, (1, 2, 0))) dataiter = iter(training_loader) images, labels = dataiter.next() img_grid = torchvision.utils.make_grid(images) matplotlib_imshow(img_grid, one_channel=True) A: set num_workers=0 On Windows, due to multiprocessing restrictions, setting num_workers to > 0 leads to errors. This is expected. There is an issue on Github too: A: You need to first figure out why the dataLoader worker crashed. A common reason is out of memory. You can check this by running dmesg -T after your script crashes and see if the system killed any python process. A: I have this problem(RuntimeError: DataLoader worker (pid(s) 78192) exited unexpectedly) when I compiled the test code of pytorch, and the cmd window of win10 tell me that your current disk space is not enough, you can increase the virtual disk space that test code is in. I hope it's useful for you.
RuntimeError: DataLoader worker (pid(s) 15876, 2756) exited unexpectedly
I am compiling some existing examples from the PyTorch tutorial website. I am working especially on the CPU device no GPU. When running a program the type of error below is shown. Does it become I'm working on the CPU device or setup issue? raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e RuntimeError: DataLoader worker (pid(s) 15876, 2756) exited unexpectedly`. How can I solve it? import torch import torch.functional as F import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader from torchvision import datasets device = 'cpu' if torch.cuda.is_available() else 'cuda' print(device) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))] ) #Store separate training and validations splits in data training_set = datasets.FashionMNIST( root='data', train=True, download=True, transform=transform ) validation_set = datasets.FashionMNIST( root='data', train=False, download=True, transform=transform ) training_loader = DataLoader(training_set, batch_size=4, shuffle=True, num_workers=2) validation_loader = DataLoader(validation_set, batch_size=4, shuffle=False, num_workers=2) classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle Boot') def matplotlib_imshow(img, one_channel=False): if one_channel: img = img.mean(dim=0) img = img/2+0.5 #unnormalize npimg = img.numpy() if one_channel: plt.imshow(npimg, cmap="Greys") else: plt.imshow(np.transpose(npimg, (1, 2, 0))) dataiter = iter(training_loader) images, labels = dataiter.next() img_grid = torchvision.utils.make_grid(images) matplotlib_imshow(img_grid, one_channel=True)
[ "set num_workers=0\nOn Windows, due to multiprocessing restrictions, setting num_workers to > 0 leads to errors. This is expected.\nThere is an issue on Github too:\n", "You need to first figure out why the dataLoader worker crashed. A common reason is out of memory. You can check this by running dmesg -T after your script crashes and see if the system killed any python process.\n", "I have this problem(RuntimeError: DataLoader worker (pid(s) 78192) exited unexpectedly) when I compiled the test code of pytorch, and the cmd window of win10 tell me that your current disk space is not enough, you can increase the virtual disk space that test code is in. I hope it's useful for you.\n" ]
[ 2, 0, 0 ]
[]
[]
[ "python", "pytorch", "pytorch_dataloader" ]
stackoverflow_0071713719_python_pytorch_pytorch_dataloader.txt
Q: How do I split a list into equally-sized chunks? How do I split a list of arbitrary length into equal sized chunks? See How to iterate over a list in chunks if the data result will be used directly for a loop, and does not need to be stored. For the same question with a string input, see Split string every nth character?. The same techniques generally apply, though there are some variations. A: Here's a generator that yields evenly-sized chunks: def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i + n] import pprint pprint.pprint(list(chunks(range(10, 75), 10))) [[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59], [60, 61, 62, 63, 64, 65, 66, 67, 68, 69], [70, 71, 72, 73, 74]] For Python 2, using xrange instead of range: def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in xrange(0, len(lst), n): yield lst[i:i + n] Below is a list comprehension one-liner. The method above is preferable, though, since using named functions makes code easier to understand. For Python 3: [lst[i:i + n] for i in range(0, len(lst), n)] For Python 2: [lst[i:i + n] for i in xrange(0, len(lst), n)] A: Something super simple: def chunks(xs, n): n = max(1, n) return (xs[i:i+n] for i in range(0, len(xs), n)) For Python 2, use xrange() instead of range(). A: I know this is kind of old but nobody yet mentioned numpy.array_split: import numpy as np lst = range(50) np.array_split(lst, 5) Result: [array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), array([20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), array([30, 31, 32, 33, 34, 35, 36, 37, 38, 39]), array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49])] A: Directly from the (old) Python documentation (recipes for itertools): from itertools import izip, chain, repeat def grouper(n, iterable, padvalue=None): "grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')" return izip(*[chain(iterable, repeat(padvalue, n-1))]*n) The current version, as suggested by J.F.Sebastian: #from itertools import izip_longest as zip_longest # for Python 2.x from itertools import zip_longest # for Python 3.x #from six.moves import zip_longest # for both (uses the six compat library) def grouper(n, iterable, padvalue=None): "grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')" return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue) I guess Guido's time machine works—worked—will work—will have worked—was working again. These solutions work because [iter(iterable)]*n (or the equivalent in the earlier version) creates one iterator, repeated n times in the list. izip_longest then effectively performs a round-robin of "each" iterator; because this is the same iterator, it is advanced by each such call, resulting in each such zip-roundrobin generating one tuple of n items. A: I'm surprised nobody has thought of using iter's two-argument form: from itertools import islice def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) Demo: >>> list(chunk(range(14), 3)) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13)] This works with any iterable and produces output lazily. It returns tuples rather than iterators, but I think it has a certain elegance nonetheless. It also doesn't pad; if you want padding, a simple variation on the above will suffice: from itertools import islice, chain, repeat def chunk_pad(it, size, padval=None): it = chain(iter(it), repeat(padval)) return iter(lambda: tuple(islice(it, size)), (padval,) * size) Demo: >>> list(chunk_pad(range(14), 3)) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13, None)] >>> list(chunk_pad(range(14), 3, 'a')) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13, 'a')] Like the izip_longest-based solutions, the above always pads. As far as I know, there's no one- or two-line itertools recipe for a function that optionally pads. By combining the above two approaches, this one comes pretty close: _no_padding = object() def chunk(it, size, padval=_no_padding): if padval == _no_padding: it = iter(it) sentinel = () else: it = chain(iter(it), repeat(padval)) sentinel = (padval,) * size return iter(lambda: tuple(islice(it, size)), sentinel) Demo: >>> list(chunk(range(14), 3)) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13)] >>> list(chunk(range(14), 3, None)) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13, None)] >>> list(chunk(range(14), 3, 'a')) [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13, 'a')] I believe this is the shortest chunker proposed that offers optional padding. As Tomasz Gandor observed, the two padding chunkers will stop unexpectedly if they encounter a long sequence of pad values. Here's a final variation that works around that problem in a reasonable way: _no_padding = object() def chunk(it, size, padval=_no_padding): it = iter(it) chunker = iter(lambda: tuple(islice(it, size)), ()) if padval == _no_padding: yield from chunker else: for ch in chunker: yield ch if len(ch) == size else ch + (padval,) * (size - len(ch)) Demo: >>> list(chunk([1, 2, (), (), 5], 2)) [(1, 2), ((), ()), (5,)] >>> list(chunk([1, 2, None, None, 5], 2, None)) [(1, 2), (None, None), (5, None)] A: Here is a generator that work on arbitrary iterables: def split_seq(iterable, size): it = iter(iterable) item = list(itertools.islice(it, size)) while item: yield item item = list(itertools.islice(it, size)) Example: >>> import pprint >>> pprint.pprint(list(split_seq(xrange(75), 10))) [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59], [60, 61, 62, 63, 64, 65, 66, 67, 68, 69], [70, 71, 72, 73, 74]] A: Simple yet elegant L = range(1, 1000) print [L[x:x+10] for x in xrange(0, len(L), 10)] or if you prefer: def chunks(L, n): return [L[x: x+n] for x in xrange(0, len(L), n)] chunks(L, 10) A: Don't reinvent the wheel. UPDATE: The upcoming Python 3.12 introduces itertools.batched, which solves this problem at last. See below. Given import itertools as it import collections as ct import more_itertools as mit iterable = range(11) n = 3 Code itertools.batched++ list(it.batched(iterable, n)) # [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] more_itertools+ list(mit.chunked(iterable, n)) # [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] list(mit.sliced(iterable, n)) # [range(0, 3), range(3, 6), range(6, 9), range(9, 11)] list(mit.grouper(n, iterable)) # [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, None)] list(mit.windowed(iterable, len(iterable)//n, step=n)) # [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, None)] list(mit.chunked_even(iterable, n)) # [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] (or DIY, if you want) The Standard Library list(it.zip_longest(*[iter(iterable)] * n)) # [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, None)] d = {} for i, x in enumerate(iterable): d.setdefault(i//n, []).append(x) list(d.values()) # [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] dd = ct.defaultdict(list) for i, x in enumerate(iterable): dd[i//n].append(x) list(dd.values()) # [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] References more_itertools.chunked (related posted) more_itertools.sliced more_itertools.grouper (related post) more_itertools.windowed (see also stagger, zip_offset) more_itertools.chunked_even zip_longest (related post, related post) setdefault (ordered results requires Python 3.6+) collections.defaultdict (ordered results requires Python 3.6+) + A third-party library that implements itertools recipes and more. > pip install more_itertools ++Included in Python Standard Library 3.12+. batched is similar to more_itertools.chunked. A: def chunk(input, size): return map(None, *([iter(input)] * size)) A: How do you split a list into evenly sized chunks? "Evenly sized chunks", to me, implies that they are all the same length, or barring that option, at minimal variance in length. E.g. 5 baskets for 21 items could have the following results: >>> import statistics >>> statistics.variance([5,5,5,5,1]) 3.2 >>> statistics.variance([5,4,4,4,4]) 0.19999999999999998 A practical reason to prefer the latter result: if you were using these functions to distribute work, you've built-in the prospect of one likely finishing well before the others, so it would sit around doing nothing while the others continued working hard. Critique of other answers here When I originally wrote this answer, none of the other answers were evenly sized chunks - they all leave a runt chunk at the end, so they're not well balanced, and have a higher than necessary variance of lengths. For example, the current top answer ends with: [60, 61, 62, 63, 64, 65, 66, 67, 68, 69], [70, 71, 72, 73, 74]] Others, like list(grouper(3, range(7))), and chunk(range(7), 3) both return: [(0, 1, 2), (3, 4, 5), (6, None, None)]. The None's are just padding, and rather inelegant in my opinion. They are NOT evenly chunking the iterables. Why can't we divide these better? Cycle Solution A high-level balanced solution using itertools.cycle, which is the way I might do it today. Here's the setup: from itertools import cycle items = range(10, 75) number_of_baskets = 10 Now we need our lists into which to populate the elements: baskets = [[] for _ in range(number_of_baskets)] Finally, we zip the elements we're going to allocate together with a cycle of the baskets until we run out of elements, which, semantically, it exactly what we want: for element, basket in zip(items, cycle(baskets)): basket.append(element) Here's the result: >>> from pprint import pprint >>> pprint(baskets) [[10, 20, 30, 40, 50, 60, 70], [11, 21, 31, 41, 51, 61, 71], [12, 22, 32, 42, 52, 62, 72], [13, 23, 33, 43, 53, 63, 73], [14, 24, 34, 44, 54, 64, 74], [15, 25, 35, 45, 55, 65], [16, 26, 36, 46, 56, 66], [17, 27, 37, 47, 57, 67], [18, 28, 38, 48, 58, 68], [19, 29, 39, 49, 59, 69]] To productionize this solution, we write a function, and provide the type annotations: from itertools import cycle from typing import List, Any def cycle_baskets(items: List[Any], maxbaskets: int) -> List[List[Any]]: baskets = [[] for _ in range(min(maxbaskets, len(items)))] for item, basket in zip(items, cycle(baskets)): basket.append(item) return baskets In the above, we take our list of items, and the max number of baskets. We create a list of empty lists, in which to append each element, in a round-robin style. Slices Another elegant solution is to use slices - specifically the less-commonly used step argument to slices. i.e.: start = 0 stop = None step = number_of_baskets first_basket = items[start:stop:step] This is especially elegant in that slices don't care how long the data are - the result, our first basket, is only as long as it needs to be. We'll only need to increment the starting point for each basket. In fact this could be a one-liner, but we'll go multiline for readability and to avoid an overlong line of code: from typing import List, Any def slice_baskets(items: List[Any], maxbaskets: int) -> List[List[Any]]: n_baskets = min(maxbaskets, len(items)) return [items[i::n_baskets] for i in range(n_baskets)] And islice from the itertools module will provide a lazily iterating approach, like that which was originally asked for in the question. I don't expect most use-cases to benefit very much, as the original data is already fully materialized in a list, but for large datasets, it could save nearly half the memory usage. from itertools import islice from typing import List, Any, Generator def yield_islice_baskets(items: List[Any], maxbaskets: int) -> Generator[List[Any], None, None]: n_baskets = min(maxbaskets, len(items)) for i in range(n_baskets): yield islice(items, i, None, n_baskets) View results with: from pprint import pprint items = list(range(10, 75)) pprint(cycle_baskets(items, 10)) pprint(slice_baskets(items, 10)) pprint([list(s) for s in yield_islice_baskets(items, 10)]) Updated prior solutions Here's another balanced solution, adapted from a function I've used in production in the past, that uses the modulo operator: def baskets_from(items, maxbaskets=25): baskets = [[] for _ in range(maxbaskets)] for i, item in enumerate(items): baskets[i % maxbaskets].append(item) return filter(None, baskets) And I created a generator that does the same if you put it into a list: def iter_baskets_from(items, maxbaskets=3): '''generates evenly balanced baskets from indexable iterable''' item_count = len(items) baskets = min(item_count, maxbaskets) for x_i in range(baskets): yield [items[y_i] for y_i in range(x_i, item_count, baskets)] And finally, since I see that all of the above functions return elements in a contiguous order (as they were given): def iter_baskets_contiguous(items, maxbaskets=3, item_count=None): ''' generates balanced baskets from iterable, contiguous contents provide item_count if providing a iterator that doesn't support len() ''' item_count = item_count or len(items) baskets = min(item_count, maxbaskets) items = iter(items) floor = item_count // baskets ceiling = floor + 1 stepdown = item_count % baskets for x_i in range(baskets): length = ceiling if x_i < stepdown else floor yield [items.next() for _ in range(length)] Output To test them out: print(baskets_from(range(6), 8)) print(list(iter_baskets_from(range(6), 8))) print(list(iter_baskets_contiguous(range(6), 8))) print(baskets_from(range(22), 8)) print(list(iter_baskets_from(range(22), 8))) print(list(iter_baskets_contiguous(range(22), 8))) print(baskets_from('ABCDEFG', 3)) print(list(iter_baskets_from('ABCDEFG', 3))) print(list(iter_baskets_contiguous('ABCDEFG', 3))) print(baskets_from(range(26), 5)) print(list(iter_baskets_from(range(26), 5))) print(list(iter_baskets_contiguous(range(26), 5))) Which prints out: [[0], [1], [2], [3], [4], [5]] [[0], [1], [2], [3], [4], [5]] [[0], [1], [2], [3], [4], [5]] [[0, 8, 16], [1, 9, 17], [2, 10, 18], [3, 11, 19], [4, 12, 20], [5, 13, 21], [6, 14], [7, 15]] [[0, 8, 16], [1, 9, 17], [2, 10, 18], [3, 11, 19], [4, 12, 20], [5, 13, 21], [6, 14], [7, 15]] [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14], [15, 16, 17], [18, 19], [20, 21]] [['A', 'D', 'G'], ['B', 'E'], ['C', 'F']] [['A', 'D', 'G'], ['B', 'E'], ['C', 'F']] [['A', 'B', 'C'], ['D', 'E'], ['F', 'G']] [[0, 5, 10, 15, 20, 25], [1, 6, 11, 16, 21], [2, 7, 12, 17, 22], [3, 8, 13, 18, 23], [4, 9, 14, 19, 24]] [[0, 5, 10, 15, 20, 25], [1, 6, 11, 16, 21], [2, 7, 12, 17, 22], [3, 8, 13, 18, 23], [4, 9, 14, 19, 24]] [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]] Notice that the contiguous generator provide chunks in the same length patterns as the other two, but the items are all in order, and they are as evenly divided as one may divide a list of discrete elements. A: If you know list size: def SplitList(mylist, chunk_size): return [mylist[offs:offs+chunk_size] for offs in range(0, len(mylist), chunk_size)] If you don't (an iterator): def IterChunks(sequence, chunk_size): res = [] for item in sequence: res.append(item) if len(res) >= chunk_size: yield res res = [] if res: yield res # yield the last, incomplete, portion In the latter case, it can be rephrased in a more beautiful way if you can be sure that the sequence always contains a whole number of chunks of given size (i.e. there is no incomplete last chunk). A: I saw the most awesome Python-ish answer in a duplicate of this question: from itertools import zip_longest a = range(1, 16) i = iter(a) r = list(zip_longest(i, i, i)) >>> print(r) [(1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12), (13, 14, 15)] You can create n-tuple for any n. If a = range(1, 15), then the result will be: [(1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12), (13, 14, None)] If the list is divided evenly, then you can replace zip_longest with zip, otherwise the triplet (13, 14, None) would be lost. Python 3 is used above. For Python 2, use izip_longest. A: [AA[i:i+SS] for i in range(len(AA))[::SS]] Where AA is array, SS is chunk size. For example: >>> AA=range(10,21);SS=3 >>> [AA[i:i+SS] for i in range(len(AA))[::SS]] [[10, 11, 12], [13, 14, 15], [16, 17, 18], [19, 20]] # or [range(10, 13), range(13, 16), range(16, 19), range(19, 21)] in py3 To expand the ranges in py3 do (py3) >>> [list(AA[i:i+SS]) for i in range(len(AA))[::SS]] [[10, 11, 12], [13, 14, 15], [16, 17, 18], [19, 20]] A: If you had a chunk size of 3 for example, you could do: zip(*[iterable[i::3] for i in range(3)]) source: http://code.activestate.com/recipes/303060-group-a-list-into-sequential-n-tuples/ I would use this when my chunk size is fixed number I can type, e.g. '3', and would never change. A: The toolz library has the partition function for this: from toolz.itertoolz.core import partition list(partition(2, [1, 2, 3, 4])) [(1, 2), (3, 4)] A: With Assignment Expressions in Python 3.8 it becomes quite nice: import itertools def batch(iterable, size): it = iter(iterable) while item := list(itertools.islice(it, size)): yield item This works on an arbitrary iterable, not just a list. >>> import pprint >>> pprint.pprint(list(batch(range(75), 10))) [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59], [60, 61, 62, 63, 64, 65, 66, 67, 68, 69], [70, 71, 72, 73, 74]] A: I was curious about the performance of different approaches and here it is: Tested on Python 3.5.1 import time batch_size = 7 arr_len = 298937 #---------slice------------- print("\r\nslice") start = time.time() arr = [i for i in range(0, arr_len)] while True: if not arr: break tmp = arr[0:batch_size] arr = arr[batch_size:-1] print(time.time() - start) #-----------index----------- print("\r\nindex") arr = [i for i in range(0, arr_len)] start = time.time() for i in range(0, round(len(arr) / batch_size + 1)): tmp = arr[batch_size * i : batch_size * (i + 1)] print(time.time() - start) #----------batches 1------------ def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] print("\r\nbatches 1") arr = [i for i in range(0, arr_len)] start = time.time() for x in batch(arr, batch_size): tmp = x print(time.time() - start) #----------batches 2------------ from itertools import islice, chain def batch(iterable, size): sourceiter = iter(iterable) while True: batchiter = islice(sourceiter, size) yield chain([next(batchiter)], batchiter) print("\r\nbatches 2") arr = [i for i in range(0, arr_len)] start = time.time() for x in batch(arr, batch_size): tmp = x print(time.time() - start) #---------chunks------------- def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] print("\r\nchunks") arr = [i for i in range(0, arr_len)] start = time.time() for x in chunks(arr, batch_size): tmp = x print(time.time() - start) #-----------grouper----------- from itertools import zip_longest # for Python 3.x #from six.moves import zip_longest # for both (uses the six compat library) def grouper(iterable, n, padvalue=None): "grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')" return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue) arr = [i for i in range(0, arr_len)] print("\r\ngrouper") start = time.time() for x in grouper(arr, batch_size): tmp = x print(time.time() - start) Results: slice 31.18285083770752 index 0.02184295654296875 batches 1 0.03503894805908203 batches 2 0.22681021690368652 chunks 0.019841909408569336 grouper 0.006506919860839844 A: You may also use get_chunks function of utilspie library as: >>> from utilspie import iterutils >>> a = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(iterutils.get_chunks(a, 5)) [[1, 2, 3, 4, 5], [6, 7, 8, 9]] You can install utilspie via pip: sudo pip install utilspie Disclaimer: I am the creator of utilspie library. A: I like the Python doc's version proposed by tzot and J.F.Sebastian a lot, but it has two shortcomings: it is not very explicit I usually don't want a fill value in the last chunk I'm using this one a lot in my code: from itertools import islice def chunks(n, iterable): iterable = iter(iterable) while True: yield tuple(islice(iterable, n)) or iterable.next() UPDATE: A lazy chunks version: from itertools import chain, islice def chunks(n, iterable): iterable = iter(iterable) while True: yield chain([next(iterable)], islice(iterable, n-1)) A: code: def split_list(the_list, chunk_size): result_list = [] while the_list: result_list.append(the_list[:chunk_size]) the_list = the_list[chunk_size:] return result_list a_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] print split_list(a_list, 3) result: [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]] A: heh, one line version In [48]: chunk = lambda ulist, step: map(lambda i: ulist[i:i+step], xrange(0, len(ulist), step)) In [49]: chunk(range(1,100), 10) Out[49]: [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19, 20], [21, 22, 23, 24, 25, 26, 27, 28, 29, 30], [31, 32, 33, 34, 35, 36, 37, 38, 39, 40], [41, 42, 43, 44, 45, 46, 47, 48, 49, 50], [51, 52, 53, 54, 55, 56, 57, 58, 59, 60], [61, 62, 63, 64, 65, 66, 67, 68, 69, 70], [71, 72, 73, 74, 75, 76, 77, 78, 79, 80], [81, 82, 83, 84, 85, 86, 87, 88, 89, 90], [91, 92, 93, 94, 95, 96, 97, 98, 99]] A: Another more explicit version. def chunkList(initialList, chunkSize): """ This function chunks a list into sub lists that have a length equals to chunkSize. Example: lst = [3, 4, 9, 7, 1, 1, 2, 3] print(chunkList(lst, 3)) returns [[3, 4, 9], [7, 1, 1], [2, 3]] """ finalList = [] for i in range(0, len(initialList), chunkSize): finalList.append(initialList[i:i+chunkSize]) return finalList A: At this point, I think we need a recursive generator, just in case... In python 2: def chunks(li, n): if li == []: return yield li[:n] for e in chunks(li[n:], n): yield e In python 3: def chunks(li, n): if li == []: return yield li[:n] yield from chunks(li[n:], n) Also, in case of massive Alien invasion, a decorated recursive generator might become handy: def dec(gen): def new_gen(li, n): for e in gen(li, n): if e == []: return yield e return new_gen @dec def chunks(li, n): yield li[:n] for e in chunks(li[n:], n): yield e A: Without calling len() which is good for large lists: def splitter(l, n): i = 0 chunk = l[:n] while chunk: yield chunk i += n chunk = l[i:i+n] And this is for iterables: def isplitter(l, n): l = iter(l) chunk = list(islice(l, n)) while chunk: yield chunk chunk = list(islice(l, n)) The functional flavour of the above: def isplitter2(l, n): return takewhile(bool, (tuple(islice(start, n)) for start in repeat(iter(l)))) OR: def chunks_gen_sentinel(n, seq): continuous_slices = imap(islice, repeat(iter(seq)), repeat(0), repeat(n)) return iter(imap(tuple, continuous_slices).next,()) OR: def chunks_gen_filter(n, seq): continuous_slices = imap(islice, repeat(iter(seq)), repeat(0), repeat(n)) return takewhile(bool,imap(tuple, continuous_slices)) A: def split_seq(seq, num_pieces): start = 0 for i in xrange(num_pieces): stop = start + len(seq[i::num_pieces]) yield seq[start:stop] start = stop usage: seq = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] for seq in split_seq(seq, 3): print seq A: See this reference >>> orange = range(1, 1001) >>> otuples = list( zip(*[iter(orange)]*10)) >>> print(otuples) [(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), ... (991, 992, 993, 994, 995, 996, 997, 998, 999, 1000)] >>> olist = [list(i) for i in otuples] >>> print(olist) [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], ..., [991, 992, 993, 994, 995, 996, 997, 998, 999, 1000]] >>> Python3 A: def chunks(iterable,n): """assumes n is an integer>0 """ iterable=iter(iterable) while True: result=[] for i in range(n): try: a=next(iterable) except StopIteration: break else: result.append(a) if result: yield result else: break g1=(i*i for i in range(10)) g2=chunks(g1,3) print g2 '<generator object chunks at 0x0337B9B8>' print list(g2) '[[0, 1, 4], [9, 16, 25], [36, 49, 64], [81]]' A: Since everybody here talking about iterators. boltons has perfect method for that, called iterutils.chunked_iter. from boltons import iterutils list(iterutils.chunked_iter(list(range(50)), 11)) Output: [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43], [44, 45, 46, 47, 48, 49]] But if you don't want to be mercy on memory, you can use old-way and store the full list in the first place with iterutils.chunked. A: Consider using matplotlib.cbook pieces for example: import matplotlib.cbook as cbook segments = cbook.pieces(np.arange(20), 3) for s in segments: print s A: a = [1, 2, 3, 4, 5, 6, 7, 8, 9] CHUNK = 4 [a[i*CHUNK:(i+1)*CHUNK] for i in xrange((len(a) + CHUNK - 1) / CHUNK )] A: One more solution def make_chunks(data, chunk_size): while data: chunk, data = data[:chunk_size], data[chunk_size:] yield chunk >>> for chunk in make_chunks([1, 2, 3, 4, 5, 6, 7], 2): ... print chunk ... [1, 2] [3, 4] [5, 6] [7] >>> A: >>> def f(x, n, acc=[]): return f(x[n:], n, acc+[(x[:n])]) if x else acc >>> f("Hallo Welt", 3) ['Hal', 'lo ', 'Wel', 't'] >>> If you are into brackets - I picked up a book on Erlang :) A: I realise this question is old (stumbled over it on Google), but surely something like the following is far simpler and clearer than any of the huge complex suggestions and only uses slicing: def chunker(iterable, chunksize): for i,c in enumerate(iterable[::chunksize]): yield iterable[i*chunksize:(i+1)*chunksize] >>> for chunk in chunker(range(0,100), 10): ... print list(chunk) ... [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] [10, 11, 12, 13, 14, 15, 16, 17, 18, 19] [20, 21, 22, 23, 24, 25, 26, 27, 28, 29] ... etc ... A: As per this answer, the top-voted answer leaves a 'runt' at the end. Here's my solution to really get about as evenly-sized chunks as you can, with no runts. It basically tries to pick exactly the fractional spot where it should split the list, but just rounds it off to the nearest integer: from __future__ import division # not needed in Python 3 def n_even_chunks(l, n): """Yield n as even chunks as possible from l.""" last = 0 for i in range(1, n+1): cur = int(round(i * (len(l) / n))) yield l[last:cur] last = cur Demonstration: >>> pprint.pprint(list(n_even_chunks(list(range(100)), 9))) [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43], [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55], [56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66], [67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77], [78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88], [89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]] >>> pprint.pprint(list(n_even_chunks(list(range(100)), 11))) [[0, 1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23, 24, 25, 26], [27, 28, 29, 30, 31, 32, 33, 34, 35], [36, 37, 38, 39, 40, 41, 42, 43, 44], [45, 46, 47, 48, 49, 50, 51, 52, 53, 54], [55, 56, 57, 58, 59, 60, 61, 62, 63], [64, 65, 66, 67, 68, 69, 70, 71, 72], [73, 74, 75, 76, 77, 78, 79, 80, 81], [82, 83, 84, 85, 86, 87, 88, 89, 90], [91, 92, 93, 94, 95, 96, 97, 98, 99]] Compare to the top-voted chunks answer: >>> pprint.pprint(list(chunks(list(range(100)), 100//9))) [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43], [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54], [55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65], [66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76], [77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87], [88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98], [99]] >>> pprint.pprint(list(chunks(list(range(100)), 100//11))) [[0, 1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23, 24, 25, 26], [27, 28, 29, 30, 31, 32, 33, 34, 35], [36, 37, 38, 39, 40, 41, 42, 43, 44], [45, 46, 47, 48, 49, 50, 51, 52, 53], [54, 55, 56, 57, 58, 59, 60, 61, 62], [63, 64, 65, 66, 67, 68, 69, 70, 71], [72, 73, 74, 75, 76, 77, 78, 79, 80], [81, 82, 83, 84, 85, 86, 87, 88, 89], [90, 91, 92, 93, 94, 95, 96, 97, 98], [99]] A: You could use numpy's array_split function e.g., np.array_split(np.array(data), 20) to split into 20 nearly equal size chunks. To make sure chunks are exactly equal in size use np.split. A: I don't think I saw this option, so just to add another one :)) : def chunks(iterable, chunk_size): i = 0; while i < len(iterable): yield iterable[i:i+chunk_size] i += chunk_size A: python pydash package could be a good choice. from pydash.arrays import chunk ids = ['22', '89', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '1'] chunk_ids = chunk(ids,5) print(chunk_ids) # output: [['22', '89', '2', '3', '4'], ['5', '6', '7', '8', '9'], ['10', '11', '1']] for more checkout pydash chunk list A: def chunk(lst): out = [] for x in xrange(2, len(lst) + 1): if not len(lst) % x: factor = len(lst) / x break while lst: out.append([lst.pop(0) for x in xrange(factor)]) return out A: letting r be the chunk size and L be the initial list, you can do. chunkL = [ [i for i in L[r*k:r*(k+1)] ] for k in range(len(L)/r)] A: Use list comprehensions: l = [1,2,3,4,5,6,7,8,9,10,11,12] k = 5 #chunk size print [tuple(l[x:y]) for (x, y) in [(x, x+k) for x in range(0, len(l), k)]] A: At this point, I think we need the obligatory anonymous-recursive function. Y = lambda f: (lambda x: x(x))(lambda y: f(lambda *args: y(y)(*args))) chunks = Y(lambda f: lambda n: [n[0][:n[1]]] + f((n[0][n[1]:], n[1])) if len(n[0]) > 0 else []) A: I have one solution below which does work but more important than that solution is a few comments on other approaches. First, a good solution shouldn't require that one loop through the sub-iterators in order. If I run g = paged_iter(list(range(50)), 11)) i0 = next(g) i1 = next(g) list(i1) list(i0) The appropriate output for the last command is [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] not [] As most of the itertools based solutions here return. This isn't just the usual boring restriction about accessing iterators in order. Imagine a consumer trying to clean up poorly entered data which reversed the appropriate order of blocks of 5, i.e., the data looks like [B5, A5, D5, C5] and should look like [A5, B5, C5, D5] (where A5 is just five elements not a sublist). This consumer would look at the claimed behavior of the grouping function and not hesitate to write a loop like i = 0 out = [] for it in paged_iter(data,5) if (i % 2 == 0): swapped = it else: out += list(it) out += list(swapped) i = i + 1 This will produce mysteriously wrong results if you sneakily assume that sub-iterators are always fully used in order. It gets even worse if you want to interleave elements from the chunks. Second, a decent number of the suggested solutions implicitly rely on the fact that iterators have a deterministic order (they don't e.g. set) and while some of the solutions using islice may be ok it worries me. Third, the itertools grouper approach works but the recipe relies on internal behavior of the zip_longest (or zip) functions that isn't part of their published behavior. In particular, the grouper function only works because in zip_longest(i0...in) the next function is always called in order next(i0), next(i1), ... next(in) before starting over. As grouper passes n copies of the same iterator object it relies on this behavior. Finally, while the solution below can be improved if you make the assumption criticized above that sub-iterators are accessed in order and fully perused without this assumption one MUST implicitly (via call chain) or explicitly (via deques or other data structure) store elements for each subiterator somewhere. So don't bother wasting time (as I did) assuming one could get around this with some clever trick. def paged_iter(iterat, n): itr = iter(iterat) deq = None try: while(True): deq = collections.deque(maxlen=n) for q in range(n): deq.append(next(itr)) yield (i for i in deq) except StopIteration: yield (i for i in deq) A: Here's an idea using itertools.groupby: def chunks(l, n): c = itertools.count() return (it for _, it in itertools.groupby(l, lambda x: next(c)//n)) This returns a generator of generators. If you want a list of lists, just replace the last line with return [list(it) for _, it in itertools.groupby(l, lambda x: next(c)//n)] Example returning list of lists: >>> chunks('abcdefghij', 4) [['a', 'b', 'c', 'd'], ['e', 'f', 'g', 'h'], ['i', 'j']] (So yes, this suffers form the "runt problem", which may or may not be a problem in a given situation.) A: An abstraction would be l = [1,2,3,4,5,6,7,8,9] n = 3 outList = [] for i in range(n, len(l) + n, n): outList.append(l[i-n:i]) print(outList) This will print: [[1, 2, 3], [4, 5, 6], [7, 8, 9]] A: I wrote a small library expressly for this purpose, available here. The library's chunked function is particularly efficient because it's implemented as a generator, so a substantial amount of memory can be saved in certain situations. It also doesn't rely on the slice notation, so any arbitrary iterator can be used. import iterlib print list(iterlib.chunked(xrange(1, 1000), 10)) # prints [(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), (11, 12, 13, 14, 15, 16, 17, 18, 19, 20), ...] A: The answer above (by koffein) has a little problem: the list is always split into an equal number of splits, not equal number of items per partition. This is my version. The "// chs + 1" takes into account that the number of items may not be divideable exactly by the partition size, so the last partition will only be partially filled. # Given 'l' is your list chs = 12 # Your chunksize partitioned = [ l[i*chs:(i*chs)+chs] for i in range((len(l) // chs)+1) ] A: No magic, but simple and correct: def chunks(iterable, n): """Yield successive n-sized chunks from iterable.""" values = [] for i, item in enumerate(iterable, 1): values.append(item) if i % n == 0: yield values values = [] if values: yield values A: Works with any iterable Inner data is generator object (not a list) One liner In [259]: get_in_chunks = lambda itr,n: ( (v for _,v in g) for _,g in itertools.groupby(enumerate(itr),lambda (ind,_): ind/n)) In [260]: list(list(x) for x in get_in_chunks(range(30),7)) Out[260]: [[0, 1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12, 13], [14, 15, 16, 17, 18, 19, 20], [21, 22, 23, 24, 25, 26, 27], [28, 29]] A: Like @AaronHall I got here looking for roughly evenly sized chunks. There are different interpretations of that. In my case, if the desired size is N, I would like each group to be of size>=N. Thus, the orphans which are created in most of the above should be redistributed to other groups. This can be done using: def nChunks(l, n): """ Yield n successive chunks from l. Works for lists, pandas dataframes, etc """ newn = int(1.0 * len(l) / n + 0.5) for i in xrange(0, n-1): yield l[i*newn:i*newn+newn] yield l[n*newn-newn:] (from Splitting a list of into N parts of approximately equal length) by simply calling it as nChunks(l,l/n) or nChunks(l,floor(l/n)) A: I have come up to following solution without creation temorary list object, which should work with any iterable object. Please note that this version for Python 2.x: def chunked(iterable, size): stop = [] it = iter(iterable) def _next_chunk(): try: for _ in xrange(size): yield next(it) except StopIteration: stop.append(True) return while not stop: yield _next_chunk() for it in chunked(xrange(16), 4): print list(it) Output: [0, 1, 2, 3] [4, 5, 6, 7] [8, 9, 10, 11] [12, 13, 14, 15] [] As you can see if len(iterable) % size == 0 then we have additional empty iterator object. But I do not think that it is big problem. A: This works in v2/v3, is inlineable, generator-based and uses only the standard library: import itertools def split_groups(iter_in, group_size): return ((x for _, x in item) for _, item in itertools.groupby(enumerate(iter_in), key=lambda x: x[0] // group_size)) A: A simple solution The OP has requested "equal sized chunk". I understand "equal sized" as "balanced" sizes: we are looking for groups of items of approximately the same sizes if equal sizes are not possible (e.g, 23/5). Inputs here are: the list of items: input_list (list of 23 numbers, for instance) the number of groups to split those items: n_groups (5, for instance) Input: input_list = list(range(23)) n_groups = 5 Groups of contiguous elements: approx_sizes = len(input_list)/n_groups groups_cont = [input_list[int(i*approx_sizes):int((i+1)*approx_sizes)] for i in range(n_groups)] Groups of "every-Nth" elements: groups_leap = [input_list[i::n_groups] for i in range(n_groups)] Results print(len(input_list)) print('Contiguous elements lists:') print(groups_cont) print('Leap every "N" items lists:') print(groups_leap) Will output: 23 Contiguous elements lists: [[0, 1, 2, 3], [4, 5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16, 17], [18, 19, 20, 21, 22]] Leap every "N" items lists: [[0, 5, 10, 15, 20], [1, 6, 11, 16, 21], [2, 7, 12, 17, 22], [3, 8, 13, 18], [4, 9, 14, 19]] A: Since I had to do something like this, here's my solution given a generator and a batch size: def pop_n_elems_from_generator(g, n): elems = [] try: for idx in xrange(0, n): elems.append(g.next()) return elems except StopIteration: return elems A: This question reminds me of the Raku (formerly Perl 6) .comb(n) method. It breaks up strings into n-sized chunks. (There's more to it than that, but I'll leave out the details.) It's easy enough to implement a similar function in Python3 as a lambda expression: comb = lambda s,n: (s[i:i+n] for i in range(0,len(s),n)) Then you can call it like this: some_list = list(range(0, 20)) # creates a list of 20 elements generator = comb(some_list, 4) # creates a generator that will generate lists of 4 elements for sublist in generator: print(sublist) # prints a sublist of four elements, as it's generated Of course, you don't have to assign the generator to a variable; you can just loop over it directly like this: for sublist in comb(some_list, 4): print(sublist) # prints a sublist of four elements, as it's generated As a bonus, this comb() function also operates on strings: list( comb('catdogant', 3) ) # returns ['cat', 'dog', 'ant'] A: A generic chunker for any iterable, which gives the user a choice of how to handle a partial chunk at the end. Tested on Python 3. chunker.py from enum import Enum class PartialChunkOptions(Enum): INCLUDE = 0 EXCLUDE = 1 PAD = 2 ERROR = 3 class PartialChunkException(Exception): pass def chunker(iterable, n, on_partial=PartialChunkOptions.INCLUDE, pad=None): """ A chunker yielding n-element lists from an iterable, with various options about what to do about a partial chunk at the end. on_partial=PartialChunkOptions.INCLUDE (the default): include the partial chunk as a short (<n) element list on_partial=PartialChunkOptions.EXCLUDE do not include the partial chunk on_partial=PartialChunkOptions.PAD pad to an n-element list (also pass pad=<pad_value>, default None) on_partial=PartialChunkOptions.ERROR raise a RuntimeError if a partial chunk is encountered """ on_partial = PartialChunkOptions(on_partial) iterator = iter(iterable) while True: vals = [] for i in range(n): try: vals.append(next(iterator)) except StopIteration: if vals: if on_partial == PartialChunkOptions.INCLUDE: yield vals elif on_partial == PartialChunkOptions.EXCLUDE: pass elif on_partial == PartialChunkOptions.PAD: yield vals + [pad] * (n - len(vals)) elif on_partial == PartialChunkOptions.ERROR: raise PartialChunkException return return yield vals test.py import chunker chunk_size = 3 for it in (range(100, 107), range(100, 109)): print("\nITERABLE TO CHUNK: {}".format(it)) print("CHUNK SIZE: {}".format(chunk_size)) for option in chunker.PartialChunkOptions.__members__.values(): print("\noption {} used".format(option)) try: for chunk in chunker.chunker(it, chunk_size, on_partial=option): print(chunk) except chunker.PartialChunkException: print("PartialChunkException was raised") print("") output of test.py ITERABLE TO CHUNK: range(100, 107) CHUNK SIZE: 3 option PartialChunkOptions.INCLUDE used [100, 101, 102] [103, 104, 105] [106] option PartialChunkOptions.EXCLUDE used [100, 101, 102] [103, 104, 105] option PartialChunkOptions.PAD used [100, 101, 102] [103, 104, 105] [106, None, None] option PartialChunkOptions.ERROR used [100, 101, 102] [103, 104, 105] PartialChunkException was raised ITERABLE TO CHUNK: range(100, 109) CHUNK SIZE: 3 option PartialChunkOptions.INCLUDE used [100, 101, 102] [103, 104, 105] [106, 107, 108] option PartialChunkOptions.EXCLUDE used [100, 101, 102] [103, 104, 105] [106, 107, 108] option PartialChunkOptions.PAD used [100, 101, 102] [103, 104, 105] [106, 107, 108] option PartialChunkOptions.ERROR used [100, 101, 102] [103, 104, 105] [106, 107, 108] A: You may use more_itertools.chunked_even along with math.ceil. Likely the easiest to reason? from math import ceil import more_itertools as mit from pprint import pprint pprint([*mit.chunked_even(range(19), ceil(19 / 5))]) # [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18]] pprint([*mit.chunked_even(range(20), ceil(20 / 5))]) # [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18, 19]] pprint([*mit.chunked_even(range(21), ceil(21 / 5))]) # [[0, 1, 2, 3, 4], # [5, 6, 7, 8], # [9, 10, 11, 12], # [13, 14, 15, 16], # [17, 18, 19, 20]] pprint([*mit.chunked_even(range(3), ceil(3 / 5))]) # [[0], [1], [2]] A: The recipes in the itertools module provide two ways to do this depending on how you want to handle a final odd-sized lot (keep it, pad it with a fillvalue, ignore it, or raise an exception): from itertools import islice, izip_longest def batched(iterable, n): "Batch data into lists of length n. The last batch may be shorter." # batched('ABCDEFG', 3) --> ABC DEF G it = iter(iterable) while True: batch = list(islice(it, n)) if not batch: return yield batch def grouper(iterable, n, *, incomplete='fill', fillvalue=None): "Collect data into non-overlapping fixed-length chunks or blocks" # grouper('ABCDEFG', 3, fillvalue='x') --> ABC DEF Gxx # grouper('ABCDEFG', 3, incomplete='strict') --> ABC DEF ValueError # grouper('ABCDEFG', 3, incomplete='ignore') --> ABC DEF args = [iter(iterable)] * n if incomplete == 'fill': return zip_longest(*args, fillvalue=fillvalue) if incomplete == 'strict': return zip(*args, strict=True) if incomplete == 'ignore': return zip(*args) else: raise ValueError('Expected fill, strict, or ignore') A: I dislike idea of splitting elements by chunk size, e.g. script can devide 101 to 3 chunks as [50, 50, 1]. For my needs I needed spliting proportionly, and keeping order same. First I wrote my own script, which works fine, and it's very simple. But I've seen later this answer, where script is better than mine, I reccomend it. Here's my script: def proportional_dividing(N, n): """ N - length of array (bigger number) n - number of chunks (smaller number) output - arr, containing N numbers, diveded roundly to n chunks """ arr = [] if N == 0: return arr elif n == 0: arr.append(N) return arr r = N // n for i in range(n-1): arr.append(r) arr.append(N-r*(n-1)) last_n = arr[-1] # last number always will be r <= last_n < 2*r # when last_n == r it's ok, but when last_n > r ... if last_n > r: # ... and if difference too big (bigger than 1), then if abs(r-last_n) > 1: #[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 7] # N=29, n=12 # we need to give unnecessary numbers to first elements back diff = last_n - r for k in range(diff): arr[k] += 1 arr[-1] = r # and we receive [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2] return arr def split_items(items, chunks): arr = proportional_dividing(len(items), chunks) splitted = [] for chunk_size in arr: splitted.append(items[:chunk_size]) items = items[chunk_size:] print(splitted) return splitted items = [1,2,3,4,5,6,7,8,9,10,11] chunks = 3 split_items(items, chunks) split_items(['a','b','c','d','e','f','g','h','i','g','k','l', 'm'], 3) split_items(['a','b','c','d','e','f','g','h','i','g','k','l', 'm', 'n'], 3) split_items(range(100), 4) split_items(range(99), 4) split_items(range(101), 4) and output: [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11]] [['a', 'b', 'c', 'd'], ['e', 'f', 'g', 'h'], ['i', 'g', 'k', 'l', 'm']] [['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'g'], ['k', 'l', 'm', 'n']] [range(0, 25), range(25, 50), range(50, 75), range(75, 100)] [range(0, 25), range(25, 50), range(50, 75), range(75, 99)] [range(0, 25), range(25, 50), range(50, 75), range(75, 101)] A: If you don't care about the order: > from itertools import groupby > batch_no = 3 > data = 'abcdefgh' > [ [x[1] for x in x[1]] for x in groupby( sorted( (x[0] % batch_no, x[1]) for x in enumerate(data) ), key=lambda x: x[0] ) ] [['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f']] This solution doesn't generates sets of same size, but distributes values so batches are as big as possible while keeping the number of generated batches. A: def main(): print(chunkify([1,2,3,4,5,6],2)) def chunkify(list, n): chunks = [] for i in range(0, len(list), n): chunks.append(list[i:i+n]) return chunks main() I think that it's simple and can give you a chunk of an array. A: I've created these two fancy one-liners which are efficient and lazy, both input and output are iterables, also they doen't depend on any module: First one-liner is totally lazy meaning that it returns iterator producing iterators (i.e. each chunk produced is iterator iterating over chunk's elements), this version is good for the case if chunks are very large or elements are produced slowly one by one and should become available immediately as they are produced: Try it online! chunk_iters = lambda it, n: ((e for i, g in enumerate(((f,), cit)) for j, e in zip(range((1, n - 1)[i]), g)) for cit in (iter(it),) for f in cit) Second one-liner returns iterator that produces lists. Each list is produced as soon as elements of whole chunk become available through input iterator or if very last element of last chunk is reached. This version should be used if input elements are produced fast or all available immediately. Other wise first more-lazy one-liner version should be used. Try it online! chunk_lists = lambda it, n: (l for l in ([],) for i, g in enumerate((it, ((),))) for e in g for l in (l[:len(l) % n] + [e][:1 - i],) if (len(l) % n == 0) != i) Also I provide multi-line version of first chunk_iters one-liner, which returns iterator producing another iterators (going through each chunk's elements): Try it online! def chunk_iters(it, n): cit = iter(it) def one_chunk(f): yield f for i, e in zip(range(n - 1), cit): yield e for f in cit: yield one_chunk(f) A: Although there is a lot of answers I have very simple way: x = list(range(10, 75)) indices = x[0::10] print("indices: ", indices) xx = [x[i-10:i] for i in indices ] print("x= ", x) print ("xx= ",xx) the result will be : indices: [10, 20, 30, 40, 50, 60, 70] x= [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74] xx = [[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25,26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39], [40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [50, 51, 52, 53, 54, 55, 56, 57, 58, 59], [60, 61, 62, 63, 64, 65, 66, 67, 68, 69], [70, 71, 72, 73, 74]] A: Let's say the list is lst import math # length of the list len(lst) is ln # size of a chunk is size for num in range ( math.ceil(ln/size) ): start, end = num*size, min((num+1)*size, ln) print(lst[start:end]) A: User @tzot's solution zip_longest(*[iter(lst)]*n, fillvalue=padvalue) is very elegant but if the length of lst is not divisible by n, it pads the last sublist to keep its length match that of the other sublists. However, if that's not desirable, then simply using zip() to produce similar round-robin zips and appending the remaining elements of lst (that cannot make a "whole" sublist) to the output should do the trick. list(map(list, zip(*[iter(lst)]*n))) + ([rest] if (rest:=lst[len(lst)//n*n : ]) else []) The above one-liner is perhaps more readable wrapped in a function. Unlike the other functions on here, it produces a list not a generator. Depending on the use case, that may or may not be desirable. def chunkify(lst, chunk_size): nested = list(map(list, zip(*[iter(lst)]*chunk_size))) rest = lst[len(lst)//chunk_size*chunk_size: ] if rest: nested.append(rest) return nested It's faster than some of the most popular answers on here that produce the same output. my_list, n = list(range(1_000_000)), 12 %timeit list(chunks(my_list, n)) # @Ned_Batchelder # 36.4 ms ± 1.6 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) %timeit [my_list[i:i+n] for i in range(0, len(my_list), n)] # @Ned_Batchelder # 34.6 ms ± 1.12 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) %timeit it = iter(my_list); list(iter(lambda: list(islice(it, n)), [])) # @senderle # 60.6 ms ± 5.36 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) %timeit list(mit.chunked(my_list, n)) # @pylang # 59.4 ms ± 4.92 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) %timeit chunkify(my_list, n) # 25.8 ms ± 1.84 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
How do I split a list into equally-sized chunks?
How do I split a list of arbitrary length into equal sized chunks? See How to iterate over a list in chunks if the data result will be used directly for a loop, and does not need to be stored. For the same question with a string input, see Split string every nth character?. The same techniques generally apply, though there are some variations.
[ "Here's a generator that yields evenly-sized chunks:\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\nimport pprint\npprint.pprint(list(chunks(range(10, 75), 10)))\n[[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],\n [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],\n [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],\n [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],\n [70, 71, 72, 73, 74]]\n\nFor Python 2, using xrange instead of range:\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in xrange(0, len(lst), n):\n yield lst[i:i + n]\n\n\nBelow is a list comprehension one-liner. The method above is preferable, though, since using named functions makes code easier to understand. For Python 3:\n[lst[i:i + n] for i in range(0, len(lst), n)]\n\nFor Python 2:\n[lst[i:i + n] for i in xrange(0, len(lst), n)]\n\n", "Something super simple:\ndef chunks(xs, n):\n n = max(1, n)\n return (xs[i:i+n] for i in range(0, len(xs), n))\n\nFor Python 2, use xrange() instead of range().\n", "I know this is kind of old but nobody yet mentioned numpy.array_split:\nimport numpy as np\n\nlst = range(50)\nnp.array_split(lst, 5)\n\nResult:\n[array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),\n array([20, 21, 22, 23, 24, 25, 26, 27, 28, 29]),\n array([30, 31, 32, 33, 34, 35, 36, 37, 38, 39]),\n array([40, 41, 42, 43, 44, 45, 46, 47, 48, 49])]\n\n", "Directly from the (old) Python documentation (recipes for itertools):\nfrom itertools import izip, chain, repeat\n\ndef grouper(n, iterable, padvalue=None):\n \"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')\"\n return izip(*[chain(iterable, repeat(padvalue, n-1))]*n)\n\nThe current version, as suggested by J.F.Sebastian:\n#from itertools import izip_longest as zip_longest # for Python 2.x\nfrom itertools import zip_longest # for Python 3.x\n#from six.moves import zip_longest # for both (uses the six compat library)\n\ndef grouper(n, iterable, padvalue=None):\n \"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')\"\n return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue)\n\nI guess Guido's time machine works—worked—will work—will have worked—was working again.\nThese solutions work because [iter(iterable)]*n (or the equivalent in the earlier version) creates one iterator, repeated n times in the list. izip_longest then effectively performs a round-robin of \"each\" iterator; because this is the same iterator, it is advanced by each such call, resulting in each such zip-roundrobin generating one tuple of n items.\n", "I'm surprised nobody has thought of using iter's two-argument form:\nfrom itertools import islice\n\ndef chunk(it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\nDemo:\n>>> list(chunk(range(14), 3))\n[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13)]\n\nThis works with any iterable and produces output lazily. It returns tuples rather than iterators, but I think it has a certain elegance nonetheless. It also doesn't pad; if you want padding, a simple variation on the above will suffice:\nfrom itertools import islice, chain, repeat\n\ndef chunk_pad(it, size, padval=None):\n it = chain(iter(it), repeat(padval))\n return iter(lambda: tuple(islice(it, size)), (padval,) * size)\n\nDemo:\n>>> list(chunk_pad(range(14), 3))\n[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13, None)]\n>>> list(chunk_pad(range(14), 3, 'a'))\n[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13, 'a')]\n\nLike the izip_longest-based solutions, the above always pads. As far as I know, there's no one- or two-line itertools recipe for a function that optionally pads. By combining the above two approaches, this one comes pretty close:\n_no_padding = object()\n\ndef chunk(it, size, padval=_no_padding):\n if padval == _no_padding:\n it = iter(it)\n sentinel = ()\n else:\n it = chain(iter(it), repeat(padval))\n sentinel = (padval,) * size\n return iter(lambda: tuple(islice(it, size)), sentinel)\n\nDemo:\n>>> list(chunk(range(14), 3))\n[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13)]\n>>> list(chunk(range(14), 3, None))\n[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13, None)]\n>>> list(chunk(range(14), 3, 'a'))\n[(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11), (12, 13, 'a')]\n\nI believe this is the shortest chunker proposed that offers optional padding.\nAs Tomasz Gandor observed, the two padding chunkers will stop unexpectedly if they encounter a long sequence of pad values. Here's a final variation that works around that problem in a reasonable way:\n_no_padding = object()\ndef chunk(it, size, padval=_no_padding):\n it = iter(it)\n chunker = iter(lambda: tuple(islice(it, size)), ())\n if padval == _no_padding:\n yield from chunker\n else:\n for ch in chunker:\n yield ch if len(ch) == size else ch + (padval,) * (size - len(ch))\n\nDemo:\n>>> list(chunk([1, 2, (), (), 5], 2))\n[(1, 2), ((), ()), (5,)]\n>>> list(chunk([1, 2, None, None, 5], 2, None))\n[(1, 2), (None, None), (5, None)]\n\n", "Here is a generator that work on arbitrary iterables:\ndef split_seq(iterable, size):\n it = iter(iterable)\n item = list(itertools.islice(it, size))\n while item:\n yield item\n item = list(itertools.islice(it, size))\n\nExample:\n>>> import pprint\n>>> pprint.pprint(list(split_seq(xrange(75), 10)))\n[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],\n [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],\n [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],\n [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],\n [70, 71, 72, 73, 74]]\n\n", "Simple yet elegant\nL = range(1, 1000)\nprint [L[x:x+10] for x in xrange(0, len(L), 10)]\n\nor if you prefer:\ndef chunks(L, n): return [L[x: x+n] for x in xrange(0, len(L), n)]\nchunks(L, 10)\n\n", "Don't reinvent the wheel.\nUPDATE: The upcoming Python 3.12 introduces itertools.batched, which solves this problem at last. See below.\nGiven\nimport itertools as it\nimport collections as ct\n\nimport more_itertools as mit\n\n\niterable = range(11)\nn = 3\n\nCode\nitertools.batched++\nlist(it.batched(iterable, n))\n# [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]\n\nmore_itertools+\nlist(mit.chunked(iterable, n))\n# [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]\n\nlist(mit.sliced(iterable, n))\n# [range(0, 3), range(3, 6), range(6, 9), range(9, 11)]\n\nlist(mit.grouper(n, iterable))\n# [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, None)]\n\nlist(mit.windowed(iterable, len(iterable)//n, step=n))\n# [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, None)]\n\nlist(mit.chunked_even(iterable, n))\n# [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]\n\n(or DIY, if you want)\nThe Standard Library\nlist(it.zip_longest(*[iter(iterable)] * n))\n# [(0, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, None)]\n\nd = {}\nfor i, x in enumerate(iterable):\n d.setdefault(i//n, []).append(x)\n \n\nlist(d.values())\n# [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]\n\ndd = ct.defaultdict(list)\nfor i, x in enumerate(iterable):\n dd[i//n].append(x)\n \n\nlist(dd.values())\n# [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]]\n\nReferences\n\nmore_itertools.chunked (related posted)\nmore_itertools.sliced\nmore_itertools.grouper (related post)\nmore_itertools.windowed (see also stagger, zip_offset)\nmore_itertools.chunked_even\nzip_longest (related post, related post)\nsetdefault (ordered results requires Python 3.6+)\ncollections.defaultdict (ordered results requires Python 3.6+)\n\n+ A third-party library that implements itertools recipes and more. > pip install more_itertools \n++Included in Python Standard Library 3.12+. batched is similar to more_itertools.chunked.\n", "def chunk(input, size):\n return map(None, *([iter(input)] * size))\n\n", "How do you split a list into evenly sized chunks?\n\"Evenly sized chunks\", to me, implies that they are all the same length, or barring that option, at minimal variance in length. E.g. 5 baskets for 21 items could have the following results:\n>>> import statistics\n>>> statistics.variance([5,5,5,5,1]) \n3.2\n>>> statistics.variance([5,4,4,4,4]) \n0.19999999999999998\n\nA practical reason to prefer the latter result: if you were using these functions to distribute work, you've built-in the prospect of one likely finishing well before the others, so it would sit around doing nothing while the others continued working hard.\nCritique of other answers here\nWhen I originally wrote this answer, none of the other answers were evenly sized chunks - they all leave a runt chunk at the end, so they're not well balanced, and have a higher than necessary variance of lengths.\nFor example, the current top answer ends with:\n[60, 61, 62, 63, 64, 65, 66, 67, 68, 69],\n[70, 71, 72, 73, 74]]\n\nOthers, like list(grouper(3, range(7))), and chunk(range(7), 3) both return: [(0, 1, 2), (3, 4, 5), (6, None, None)]. The None's are just padding, and rather inelegant in my opinion. They are NOT evenly chunking the iterables.\nWhy can't we divide these better?\nCycle Solution\nA high-level balanced solution using itertools.cycle, which is the way I might do it today. Here's the setup:\nfrom itertools import cycle\nitems = range(10, 75)\nnumber_of_baskets = 10\n\nNow we need our lists into which to populate the elements:\nbaskets = [[] for _ in range(number_of_baskets)]\n\nFinally, we zip the elements we're going to allocate together with a cycle of the baskets until we run out of elements, which, semantically, it exactly what we want:\nfor element, basket in zip(items, cycle(baskets)):\n basket.append(element)\n\nHere's the result:\n>>> from pprint import pprint\n>>> pprint(baskets)\n[[10, 20, 30, 40, 50, 60, 70],\n [11, 21, 31, 41, 51, 61, 71],\n [12, 22, 32, 42, 52, 62, 72],\n [13, 23, 33, 43, 53, 63, 73],\n [14, 24, 34, 44, 54, 64, 74],\n [15, 25, 35, 45, 55, 65],\n [16, 26, 36, 46, 56, 66],\n [17, 27, 37, 47, 57, 67],\n [18, 28, 38, 48, 58, 68],\n [19, 29, 39, 49, 59, 69]]\n\nTo productionize this solution, we write a function, and provide the type annotations:\nfrom itertools import cycle\nfrom typing import List, Any\n\ndef cycle_baskets(items: List[Any], maxbaskets: int) -> List[List[Any]]:\n baskets = [[] for _ in range(min(maxbaskets, len(items)))]\n for item, basket in zip(items, cycle(baskets)):\n basket.append(item)\n return baskets\n\nIn the above, we take our list of items, and the max number of baskets. We create a list of empty lists, in which to append each element, in a round-robin style.\nSlices\nAnother elegant solution is to use slices - specifically the less-commonly used step argument to slices. i.e.:\nstart = 0\nstop = None\nstep = number_of_baskets\n\nfirst_basket = items[start:stop:step]\n\nThis is especially elegant in that slices don't care how long the data are - the result, our first basket, is only as long as it needs to be. We'll only need to increment the starting point for each basket.\nIn fact this could be a one-liner, but we'll go multiline for readability and to avoid an overlong line of code:\nfrom typing import List, Any\n\ndef slice_baskets(items: List[Any], maxbaskets: int) -> List[List[Any]]:\n n_baskets = min(maxbaskets, len(items))\n return [items[i::n_baskets] for i in range(n_baskets)]\n\nAnd islice from the itertools module will provide a lazily iterating approach, like that which was originally asked for in the question.\nI don't expect most use-cases to benefit very much, as the original data is already fully materialized in a list, but for large datasets, it could save nearly half the memory usage.\nfrom itertools import islice\nfrom typing import List, Any, Generator\n \ndef yield_islice_baskets(items: List[Any], maxbaskets: int) -> Generator[List[Any], None, None]:\n n_baskets = min(maxbaskets, len(items))\n for i in range(n_baskets):\n yield islice(items, i, None, n_baskets)\n\nView results with:\nfrom pprint import pprint\n\nitems = list(range(10, 75))\npprint(cycle_baskets(items, 10))\npprint(slice_baskets(items, 10))\npprint([list(s) for s in yield_islice_baskets(items, 10)])\n\nUpdated prior solutions\nHere's another balanced solution, adapted from a function I've used in production in the past, that uses the modulo operator:\ndef baskets_from(items, maxbaskets=25):\n baskets = [[] for _ in range(maxbaskets)]\n for i, item in enumerate(items):\n baskets[i % maxbaskets].append(item)\n return filter(None, baskets) \n\nAnd I created a generator that does the same if you put it into a list:\ndef iter_baskets_from(items, maxbaskets=3):\n '''generates evenly balanced baskets from indexable iterable'''\n item_count = len(items)\n baskets = min(item_count, maxbaskets)\n for x_i in range(baskets):\n yield [items[y_i] for y_i in range(x_i, item_count, baskets)]\n \n\nAnd finally, since I see that all of the above functions return elements in a contiguous order (as they were given):\ndef iter_baskets_contiguous(items, maxbaskets=3, item_count=None):\n '''\n generates balanced baskets from iterable, contiguous contents\n provide item_count if providing a iterator that doesn't support len()\n '''\n item_count = item_count or len(items)\n baskets = min(item_count, maxbaskets)\n items = iter(items)\n floor = item_count // baskets \n ceiling = floor + 1\n stepdown = item_count % baskets\n for x_i in range(baskets):\n length = ceiling if x_i < stepdown else floor\n yield [items.next() for _ in range(length)]\n\nOutput\nTo test them out:\nprint(baskets_from(range(6), 8))\nprint(list(iter_baskets_from(range(6), 8)))\nprint(list(iter_baskets_contiguous(range(6), 8)))\nprint(baskets_from(range(22), 8))\nprint(list(iter_baskets_from(range(22), 8)))\nprint(list(iter_baskets_contiguous(range(22), 8)))\nprint(baskets_from('ABCDEFG', 3))\nprint(list(iter_baskets_from('ABCDEFG', 3)))\nprint(list(iter_baskets_contiguous('ABCDEFG', 3)))\nprint(baskets_from(range(26), 5))\nprint(list(iter_baskets_from(range(26), 5)))\nprint(list(iter_baskets_contiguous(range(26), 5)))\n\nWhich prints out:\n[[0], [1], [2], [3], [4], [5]]\n[[0], [1], [2], [3], [4], [5]]\n[[0], [1], [2], [3], [4], [5]]\n[[0, 8, 16], [1, 9, 17], [2, 10, 18], [3, 11, 19], [4, 12, 20], [5, 13, 21], [6, 14], [7, 15]]\n[[0, 8, 16], [1, 9, 17], [2, 10, 18], [3, 11, 19], [4, 12, 20], [5, 13, 21], [6, 14], [7, 15]]\n[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14], [15, 16, 17], [18, 19], [20, 21]]\n[['A', 'D', 'G'], ['B', 'E'], ['C', 'F']]\n[['A', 'D', 'G'], ['B', 'E'], ['C', 'F']]\n[['A', 'B', 'C'], ['D', 'E'], ['F', 'G']]\n[[0, 5, 10, 15, 20, 25], [1, 6, 11, 16, 21], [2, 7, 12, 17, 22], [3, 8, 13, 18, 23], [4, 9, 14, 19, 24]]\n[[0, 5, 10, 15, 20, 25], [1, 6, 11, 16, 21], [2, 7, 12, 17, 22], [3, 8, 13, 18, 23], [4, 9, 14, 19, 24]]\n[[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20], [21, 22, 23, 24, 25]]\n\nNotice that the contiguous generator provide chunks in the same length patterns as the other two, but the items are all in order, and they are as evenly divided as one may divide a list of discrete elements.\n", "If you know list size:\ndef SplitList(mylist, chunk_size):\n return [mylist[offs:offs+chunk_size] for offs in range(0, len(mylist), chunk_size)]\n\nIf you don't (an iterator):\ndef IterChunks(sequence, chunk_size):\n res = []\n for item in sequence:\n res.append(item)\n if len(res) >= chunk_size:\n yield res\n res = []\n if res:\n yield res # yield the last, incomplete, portion\n\nIn the latter case, it can be rephrased in a more beautiful way if you can be sure that the sequence always contains a whole number of chunks of given size (i.e. there is no incomplete last chunk).\n", "I saw the most awesome Python-ish answer in a duplicate of this question:\nfrom itertools import zip_longest\n\na = range(1, 16)\ni = iter(a)\nr = list(zip_longest(i, i, i))\n>>> print(r)\n[(1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12), (13, 14, 15)]\n\nYou can create n-tuple for any n. If a = range(1, 15), then the result will be:\n[(1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12), (13, 14, None)]\n\nIf the list is divided evenly, then you can replace zip_longest with zip, otherwise the triplet (13, 14, None) would be lost. Python 3 is used above. For Python 2, use izip_longest.\n", "[AA[i:i+SS] for i in range(len(AA))[::SS]]\n\nWhere AA is array, SS is chunk size. For example:\n>>> AA=range(10,21);SS=3\n>>> [AA[i:i+SS] for i in range(len(AA))[::SS]]\n[[10, 11, 12], [13, 14, 15], [16, 17, 18], [19, 20]]\n# or [range(10, 13), range(13, 16), range(16, 19), range(19, 21)] in py3\n\nTo expand the ranges in py3 do\n(py3) >>> [list(AA[i:i+SS]) for i in range(len(AA))[::SS]]\n[[10, 11, 12], [13, 14, 15], [16, 17, 18], [19, 20]]\n\n", "If you had a chunk size of 3 for example, you could do:\nzip(*[iterable[i::3] for i in range(3)]) \n\nsource:\nhttp://code.activestate.com/recipes/303060-group-a-list-into-sequential-n-tuples/\nI would use this when my chunk size is fixed number I can type, e.g. '3', and would never change.\n", "The toolz library has the partition function for this:\nfrom toolz.itertoolz.core import partition\n\nlist(partition(2, [1, 2, 3, 4]))\n[(1, 2), (3, 4)]\n\n", "With Assignment Expressions in Python 3.8 it becomes quite nice:\nimport itertools\n\ndef batch(iterable, size):\n it = iter(iterable)\n while item := list(itertools.islice(it, size)):\n yield item\n\nThis works on an arbitrary iterable, not just a list.\n>>> import pprint\n>>> pprint.pprint(list(batch(range(75), 10)))\n[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24, 25, 26, 27, 28, 29],\n [30, 31, 32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47, 48, 49],\n [50, 51, 52, 53, 54, 55, 56, 57, 58, 59],\n [60, 61, 62, 63, 64, 65, 66, 67, 68, 69],\n [70, 71, 72, 73, 74]]\n\n", "I was curious about the performance of different approaches and here it is:\nTested on Python 3.5.1\nimport time\nbatch_size = 7\narr_len = 298937\n\n#---------slice-------------\n\nprint(\"\\r\\nslice\")\nstart = time.time()\narr = [i for i in range(0, arr_len)]\nwhile True:\n if not arr:\n break\n\n tmp = arr[0:batch_size]\n arr = arr[batch_size:-1]\nprint(time.time() - start)\n\n#-----------index-----------\n\nprint(\"\\r\\nindex\")\narr = [i for i in range(0, arr_len)]\nstart = time.time()\nfor i in range(0, round(len(arr) / batch_size + 1)):\n tmp = arr[batch_size * i : batch_size * (i + 1)]\nprint(time.time() - start)\n\n#----------batches 1------------\n\ndef batch(iterable, n=1):\n l = len(iterable)\n for ndx in range(0, l, n):\n yield iterable[ndx:min(ndx + n, l)]\n\nprint(\"\\r\\nbatches 1\")\narr = [i for i in range(0, arr_len)]\nstart = time.time()\nfor x in batch(arr, batch_size):\n tmp = x\nprint(time.time() - start)\n\n#----------batches 2------------\n\nfrom itertools import islice, chain\n\ndef batch(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield chain([next(batchiter)], batchiter)\n\n\nprint(\"\\r\\nbatches 2\")\narr = [i for i in range(0, arr_len)]\nstart = time.time()\nfor x in batch(arr, batch_size):\n tmp = x\nprint(time.time() - start)\n\n#---------chunks-------------\ndef chunks(l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\nprint(\"\\r\\nchunks\")\narr = [i for i in range(0, arr_len)]\nstart = time.time()\nfor x in chunks(arr, batch_size):\n tmp = x\nprint(time.time() - start)\n\n#-----------grouper-----------\n\nfrom itertools import zip_longest # for Python 3.x\n#from six.moves import zip_longest # for both (uses the six compat library)\n\ndef grouper(iterable, n, padvalue=None):\n \"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')\"\n return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue)\n\narr = [i for i in range(0, arr_len)]\nprint(\"\\r\\ngrouper\")\nstart = time.time()\nfor x in grouper(arr, batch_size):\n tmp = x\nprint(time.time() - start)\n\nResults:\nslice\n31.18285083770752\n\nindex\n0.02184295654296875\n\nbatches 1\n0.03503894805908203\n\nbatches 2\n0.22681021690368652\n\nchunks\n0.019841909408569336\n\ngrouper\n0.006506919860839844\n\n", "You may also use get_chunks function of utilspie library as:\n>>> from utilspie import iterutils\n>>> a = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n>>> list(iterutils.get_chunks(a, 5))\n[[1, 2, 3, 4, 5], [6, 7, 8, 9]]\n\nYou can install utilspie via pip:\nsudo pip install utilspie\n\nDisclaimer: I am the creator of utilspie library.\n", "I like the Python doc's version proposed by tzot and J.F.Sebastian a lot,\n but it has two shortcomings:\n\nit is not very explicit\nI usually don't want a fill value in the last chunk\n\nI'm using this one a lot in my code:\nfrom itertools import islice\n\ndef chunks(n, iterable):\n iterable = iter(iterable)\n while True:\n yield tuple(islice(iterable, n)) or iterable.next()\n\nUPDATE: A lazy chunks version:\nfrom itertools import chain, islice\n\ndef chunks(n, iterable):\n iterable = iter(iterable)\n while True:\n yield chain([next(iterable)], islice(iterable, n-1))\n\n", "code:\ndef split_list(the_list, chunk_size):\n result_list = []\n while the_list:\n result_list.append(the_list[:chunk_size])\n the_list = the_list[chunk_size:]\n return result_list\n\na_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nprint split_list(a_list, 3)\n\nresult:\n[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]\n\n", "heh, one line version\nIn [48]: chunk = lambda ulist, step: map(lambda i: ulist[i:i+step], xrange(0, len(ulist), step))\n\nIn [49]: chunk(range(1,100), 10)\nOut[49]: \n[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25, 26, 27, 28, 29, 30],\n [31, 32, 33, 34, 35, 36, 37, 38, 39, 40],\n [41, 42, 43, 44, 45, 46, 47, 48, 49, 50],\n [51, 52, 53, 54, 55, 56, 57, 58, 59, 60],\n [61, 62, 63, 64, 65, 66, 67, 68, 69, 70],\n [71, 72, 73, 74, 75, 76, 77, 78, 79, 80],\n [81, 82, 83, 84, 85, 86, 87, 88, 89, 90],\n [91, 92, 93, 94, 95, 96, 97, 98, 99]]\n\n", "Another more explicit version.\ndef chunkList(initialList, chunkSize):\n \"\"\"\n This function chunks a list into sub lists \n that have a length equals to chunkSize.\n\n Example:\n lst = [3, 4, 9, 7, 1, 1, 2, 3]\n print(chunkList(lst, 3)) \n returns\n [[3, 4, 9], [7, 1, 1], [2, 3]]\n \"\"\"\n finalList = []\n for i in range(0, len(initialList), chunkSize):\n finalList.append(initialList[i:i+chunkSize])\n return finalList\n\n", "At this point, I think we need a recursive generator, just in case...\nIn python 2:\ndef chunks(li, n):\n if li == []:\n return\n yield li[:n]\n for e in chunks(li[n:], n):\n yield e\n\nIn python 3:\ndef chunks(li, n):\n if li == []:\n return\n yield li[:n]\n yield from chunks(li[n:], n)\n\nAlso, in case of massive Alien invasion, a decorated recursive generator might become handy:\ndef dec(gen):\n def new_gen(li, n):\n for e in gen(li, n):\n if e == []:\n return\n yield e\n return new_gen\n\n@dec\ndef chunks(li, n):\n yield li[:n]\n for e in chunks(li[n:], n):\n yield e\n\n", "Without calling len() which is good for large lists:\ndef splitter(l, n):\n i = 0\n chunk = l[:n]\n while chunk:\n yield chunk\n i += n\n chunk = l[i:i+n]\n\nAnd this is for iterables:\ndef isplitter(l, n):\n l = iter(l)\n chunk = list(islice(l, n))\n while chunk:\n yield chunk\n chunk = list(islice(l, n))\n\nThe functional flavour of the above:\ndef isplitter2(l, n):\n return takewhile(bool,\n (tuple(islice(start, n))\n for start in repeat(iter(l))))\n\nOR:\ndef chunks_gen_sentinel(n, seq):\n continuous_slices = imap(islice, repeat(iter(seq)), repeat(0), repeat(n))\n return iter(imap(tuple, continuous_slices).next,())\n\nOR:\ndef chunks_gen_filter(n, seq):\n continuous_slices = imap(islice, repeat(iter(seq)), repeat(0), repeat(n))\n return takewhile(bool,imap(tuple, continuous_slices))\n\n", "def split_seq(seq, num_pieces):\n start = 0\n for i in xrange(num_pieces):\n stop = start + len(seq[i::num_pieces])\n yield seq[start:stop]\n start = stop\n\nusage:\nseq = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nfor seq in split_seq(seq, 3):\n print seq\n\n", "See this reference\n>>> orange = range(1, 1001)\n>>> otuples = list( zip(*[iter(orange)]*10))\n>>> print(otuples)\n[(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), ... (991, 992, 993, 994, 995, 996, 997, 998, 999, 1000)]\n>>> olist = [list(i) for i in otuples]\n>>> print(olist)\n[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], ..., [991, 992, 993, 994, 995, 996, 997, 998, 999, 1000]]\n>>> \n\nPython3\n", "def chunks(iterable,n):\n \"\"\"assumes n is an integer>0\n \"\"\"\n iterable=iter(iterable)\n while True:\n result=[]\n for i in range(n):\n try:\n a=next(iterable)\n except StopIteration:\n break\n else:\n result.append(a)\n if result:\n yield result\n else:\n break\n\ng1=(i*i for i in range(10))\ng2=chunks(g1,3)\nprint g2\n'<generator object chunks at 0x0337B9B8>'\nprint list(g2)\n'[[0, 1, 4], [9, 16, 25], [36, 49, 64], [81]]'\n\n", "Since everybody here talking about iterators. boltons has perfect method for that, called iterutils.chunked_iter.\nfrom boltons import iterutils\n\nlist(iterutils.chunked_iter(list(range(50)), 11))\n\nOutput:\n[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],\n [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43],\n [44, 45, 46, 47, 48, 49]]\n\nBut if you don't want to be mercy on memory, you can use old-way and store the full list in the first place with iterutils.chunked.\n", "Consider using matplotlib.cbook pieces\nfor example:\nimport matplotlib.cbook as cbook\nsegments = cbook.pieces(np.arange(20), 3)\nfor s in segments:\n print s\n\n", "a = [1, 2, 3, 4, 5, 6, 7, 8, 9]\nCHUNK = 4\n[a[i*CHUNK:(i+1)*CHUNK] for i in xrange((len(a) + CHUNK - 1) / CHUNK )]\n\n", "One more solution\ndef make_chunks(data, chunk_size): \n while data:\n chunk, data = data[:chunk_size], data[chunk_size:]\n yield chunk\n\n>>> for chunk in make_chunks([1, 2, 3, 4, 5, 6, 7], 2):\n... print chunk\n... \n[1, 2]\n[3, 4]\n[5, 6]\n[7]\n>>> \n\n", ">>> def f(x, n, acc=[]): return f(x[n:], n, acc+[(x[:n])]) if x else acc\n>>> f(\"Hallo Welt\", 3)\n['Hal', 'lo ', 'Wel', 't']\n>>> \n\nIf you are into brackets - I picked up a book on Erlang :)\n", "I realise this question is old (stumbled over it on Google), but surely something like the following is far simpler and clearer than any of the huge complex suggestions and only uses slicing:\ndef chunker(iterable, chunksize):\n for i,c in enumerate(iterable[::chunksize]):\n yield iterable[i*chunksize:(i+1)*chunksize]\n\n>>> for chunk in chunker(range(0,100), 10):\n... print list(chunk)\n... \n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\n[20, 21, 22, 23, 24, 25, 26, 27, 28, 29]\n... etc ...\n\n", "As per this answer, the top-voted answer leaves a 'runt' at the end. Here's my solution to really get about as evenly-sized chunks as you can, with no runts. It basically tries to pick exactly the fractional spot where it should split the list, but just rounds it off to the nearest integer:\nfrom __future__ import division # not needed in Python 3\ndef n_even_chunks(l, n):\n \"\"\"Yield n as even chunks as possible from l.\"\"\"\n last = 0\n for i in range(1, n+1):\n cur = int(round(i * (len(l) / n)))\n yield l[last:cur]\n last = cur\n\nDemonstration:\n>>> pprint.pprint(list(n_even_chunks(list(range(100)), 9)))\n[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],\n [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43],\n [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55],\n [56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66],\n [67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77],\n [78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88],\n [89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]]\n>>> pprint.pprint(list(n_even_chunks(list(range(100)), 11)))\n[[0, 1, 2, 3, 4, 5, 6, 7, 8],\n [9, 10, 11, 12, 13, 14, 15, 16, 17],\n [18, 19, 20, 21, 22, 23, 24, 25, 26],\n [27, 28, 29, 30, 31, 32, 33, 34, 35],\n [36, 37, 38, 39, 40, 41, 42, 43, 44],\n [45, 46, 47, 48, 49, 50, 51, 52, 53, 54],\n [55, 56, 57, 58, 59, 60, 61, 62, 63],\n [64, 65, 66, 67, 68, 69, 70, 71, 72],\n [73, 74, 75, 76, 77, 78, 79, 80, 81],\n [82, 83, 84, 85, 86, 87, 88, 89, 90],\n [91, 92, 93, 94, 95, 96, 97, 98, 99]]\n\nCompare to the top-voted chunks answer:\n>>> pprint.pprint(list(chunks(list(range(100)), 100//9)))\n[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32],\n [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43],\n [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54],\n [55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65],\n [66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76],\n [77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87],\n [88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98],\n [99]]\n>>> pprint.pprint(list(chunks(list(range(100)), 100//11)))\n[[0, 1, 2, 3, 4, 5, 6, 7, 8],\n [9, 10, 11, 12, 13, 14, 15, 16, 17],\n [18, 19, 20, 21, 22, 23, 24, 25, 26],\n [27, 28, 29, 30, 31, 32, 33, 34, 35],\n [36, 37, 38, 39, 40, 41, 42, 43, 44],\n [45, 46, 47, 48, 49, 50, 51, 52, 53],\n [54, 55, 56, 57, 58, 59, 60, 61, 62],\n [63, 64, 65, 66, 67, 68, 69, 70, 71],\n [72, 73, 74, 75, 76, 77, 78, 79, 80],\n [81, 82, 83, 84, 85, 86, 87, 88, 89],\n [90, 91, 92, 93, 94, 95, 96, 97, 98],\n [99]]\n\n", "You could use numpy's array_split function e.g., np.array_split(np.array(data), 20) to split into 20 nearly equal size chunks.\nTo make sure chunks are exactly equal in size use np.split.\n", "I don't think I saw this option, so just to add another one :)) :\ndef chunks(iterable, chunk_size):\n i = 0;\n while i < len(iterable):\n yield iterable[i:i+chunk_size]\n i += chunk_size\n\n", "python pydash package could be a good choice. \nfrom pydash.arrays import chunk\nids = ['22', '89', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '1']\nchunk_ids = chunk(ids,5)\nprint(chunk_ids)\n# output: [['22', '89', '2', '3', '4'], ['5', '6', '7', '8', '9'], ['10', '11', '1']]\n\nfor more checkout pydash chunk list\n", "def chunk(lst):\n out = []\n for x in xrange(2, len(lst) + 1):\n if not len(lst) % x:\n factor = len(lst) / x\n break\n while lst:\n out.append([lst.pop(0) for x in xrange(factor)])\n return out\n\n", "letting r be the chunk size and L be the initial list, you can do. \nchunkL = [ [i for i in L[r*k:r*(k+1)] ] for k in range(len(L)/r)] \n\n", "Use list comprehensions:\nl = [1,2,3,4,5,6,7,8,9,10,11,12]\nk = 5 #chunk size\nprint [tuple(l[x:y]) for (x, y) in [(x, x+k) for x in range(0, len(l), k)]]\n\n", "At this point, I think we need the obligatory anonymous-recursive function.\nY = lambda f: (lambda x: x(x))(lambda y: f(lambda *args: y(y)(*args)))\nchunks = Y(lambda f: lambda n: [n[0][:n[1]]] + f((n[0][n[1]:], n[1])) if len(n[0]) > 0 else [])\n\n", "I have one solution below which does work but more important than that solution is a few comments on other approaches. First, a good solution shouldn't require that one loop through the sub-iterators in order. If I run\ng = paged_iter(list(range(50)), 11))\ni0 = next(g)\ni1 = next(g)\nlist(i1)\nlist(i0)\n\nThe appropriate output for the last command is \n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\nnot\n []\n\nAs most of the itertools based solutions here return. This isn't just the usual boring restriction about accessing iterators in order. Imagine a consumer trying to clean up poorly entered data which reversed the appropriate order of blocks of 5, i.e., the data looks like [B5, A5, D5, C5] and should look like [A5, B5, C5, D5] (where A5 is just five elements not a sublist). This consumer would look at the claimed behavior of the grouping function and not hesitate to write a loop like\ni = 0\nout = []\nfor it in paged_iter(data,5)\n if (i % 2 == 0):\n swapped = it\n else: \n out += list(it)\n out += list(swapped)\n i = i + 1\n\nThis will produce mysteriously wrong results if you sneakily assume that sub-iterators are always fully used in order. It gets even worse if you want to interleave elements from the chunks. \nSecond, a decent number of the suggested solutions implicitly rely on the fact that iterators have a deterministic order (they don't e.g. set) and while some of the solutions using islice may be ok it worries me.\nThird, the itertools grouper approach works but the recipe relies on internal behavior of the zip_longest (or zip) functions that isn't part of their published behavior. In particular, the grouper function only works because in zip_longest(i0...in) the next function is always called in order next(i0), next(i1), ... next(in) before starting over. As grouper passes n copies of the same iterator object it relies on this behavior.\nFinally, while the solution below can be improved if you make the assumption criticized above that sub-iterators are accessed in order and fully perused without this assumption one MUST implicitly (via call chain) or explicitly (via deques or other data structure) store elements for each subiterator somewhere. So don't bother wasting time (as I did) assuming one could get around this with some clever trick.\ndef paged_iter(iterat, n):\n itr = iter(iterat)\n deq = None\n try:\n while(True):\n deq = collections.deque(maxlen=n)\n for q in range(n):\n deq.append(next(itr))\n yield (i for i in deq)\n except StopIteration:\n yield (i for i in deq)\n\n", "Here's an idea using itertools.groupby:\ndef chunks(l, n):\n c = itertools.count()\n return (it for _, it in itertools.groupby(l, lambda x: next(c)//n))\n\nThis returns a generator of generators. If you want a list of lists, just replace the last line with\n return [list(it) for _, it in itertools.groupby(l, lambda x: next(c)//n)]\n\nExample returning list of lists:\n>>> chunks('abcdefghij', 4)\n[['a', 'b', 'c', 'd'], ['e', 'f', 'g', 'h'], ['i', 'j']]\n\n(So yes, this suffers form the \"runt problem\", which may or may not be a problem in a given situation.)\n", "An abstraction would be\nl = [1,2,3,4,5,6,7,8,9]\nn = 3\noutList = []\nfor i in range(n, len(l) + n, n):\n outList.append(l[i-n:i])\n\nprint(outList)\n\nThis will print:\n\n[[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n\n", "I wrote a small library expressly for this purpose, available here. The library's chunked function is particularly efficient because it's implemented as a generator, so a substantial amount of memory can be saved in certain situations. It also doesn't rely on the slice notation, so any arbitrary iterator can be used.\nimport iterlib\n\nprint list(iterlib.chunked(xrange(1, 1000), 10))\n# prints [(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), (11, 12, 13, 14, 15, 16, 17, 18, 19, 20), ...]\n\n", "The answer above (by koffein) has a little problem: the list is always split into an equal number of splits, not equal number of items per partition. This is my version. The \"// chs + 1\" takes into account that the number of items may not be divideable exactly by the partition size, so the last partition will only be partially filled.\n# Given 'l' is your list\n\nchs = 12 # Your chunksize\npartitioned = [ l[i*chs:(i*chs)+chs] for i in range((len(l) // chs)+1) ]\n\n", "No magic, but simple and correct:\ndef chunks(iterable, n):\n \"\"\"Yield successive n-sized chunks from iterable.\"\"\"\n values = []\n for i, item in enumerate(iterable, 1):\n values.append(item)\n if i % n == 0:\n yield values\n values = []\n if values:\n yield values\n\n", "\nWorks with any iterable\nInner data is generator object (not a list)\nOne liner\n\n\nIn [259]: get_in_chunks = lambda itr,n: ( (v for _,v in g) for _,g in itertools.groupby(enumerate(itr),lambda (ind,_): ind/n))\n\nIn [260]: list(list(x) for x in get_in_chunks(range(30),7))\nOut[260]:\n[[0, 1, 2, 3, 4, 5, 6],\n [7, 8, 9, 10, 11, 12, 13],\n [14, 15, 16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25, 26, 27],\n [28, 29]]\n\n", "Like @AaronHall I got here looking for roughly evenly sized chunks. There are different interpretations of that. In my case, if the desired size is N, I would like each group to be of size>=N.\nThus, the orphans which are created in most of the above should be redistributed to other groups.\nThis can be done using:\ndef nChunks(l, n):\n \"\"\" Yield n successive chunks from l.\n Works for lists, pandas dataframes, etc\n \"\"\"\n newn = int(1.0 * len(l) / n + 0.5)\n for i in xrange(0, n-1):\n yield l[i*newn:i*newn+newn]\n yield l[n*newn-newn:]\n\n(from Splitting a list of into N parts of approximately equal length) by simply calling it as nChunks(l,l/n) or nChunks(l,floor(l/n))\n", "I have come up to following solution without creation temorary list object, which should work with any iterable object. Please note that this version for Python 2.x:\ndef chunked(iterable, size):\n stop = []\n it = iter(iterable)\n def _next_chunk():\n try:\n for _ in xrange(size):\n yield next(it)\n except StopIteration:\n stop.append(True)\n return\n\n while not stop:\n yield _next_chunk()\n\nfor it in chunked(xrange(16), 4):\n print list(it)\n\nOutput:\n[0, 1, 2, 3]\n[4, 5, 6, 7]\n[8, 9, 10, 11]\n[12, 13, 14, 15] \n[]\n\nAs you can see if len(iterable) % size == 0 then we have additional empty iterator object. But I do not think that it is big problem.\n", "This works in v2/v3, is inlineable, generator-based and uses only the standard library:\nimport itertools\ndef split_groups(iter_in, group_size):\n return ((x for _, x in item) for _, item in itertools.groupby(enumerate(iter_in), key=lambda x: x[0] // group_size))\n\n", "A simple solution\n\nThe OP has requested \"equal sized chunk\". I understand \"equal sized\" as \"balanced\" sizes: we are looking for groups of items of approximately the same sizes if equal sizes are not possible (e.g, 23/5).\n\nInputs here are:\n\nthe list of items: input_list (list of 23 numbers, for instance)\nthe number of groups to split those items: n_groups (5, for instance)\n\nInput:\ninput_list = list(range(23))\nn_groups = 5\n\nGroups of contiguous elements:\napprox_sizes = len(input_list)/n_groups \n\ngroups_cont = [input_list[int(i*approx_sizes):int((i+1)*approx_sizes)] \n for i in range(n_groups)]\n\nGroups of \"every-Nth\" elements:\ngroups_leap = [input_list[i::n_groups] \n for i in range(n_groups)]\n\nResults\nprint(len(input_list))\n\nprint('Contiguous elements lists:')\nprint(groups_cont)\n\nprint('Leap every \"N\" items lists:')\nprint(groups_leap)\n\n\nWill output:\n23\n\nContiguous elements lists:\n[[0, 1, 2, 3], [4, 5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16, 17], [18, 19, 20, 21, 22]]\n\nLeap every \"N\" items lists:\n[[0, 5, 10, 15, 20], [1, 6, 11, 16, 21], [2, 7, 12, 17, 22], [3, 8, 13, 18], [4, 9, 14, 19]]\n\n\n", "Since I had to do something like this, here's my solution given a generator and a batch size:\ndef pop_n_elems_from_generator(g, n):\n elems = []\n try:\n for idx in xrange(0, n):\n elems.append(g.next())\n return elems\n except StopIteration:\n return elems\n\n", "This question reminds me of the Raku (formerly Perl 6) .comb(n) method. It breaks up strings into n-sized chunks. (There's more to it than that, but I'll leave out the details.)\nIt's easy enough to implement a similar function in Python3 as a lambda expression:\ncomb = lambda s,n: (s[i:i+n] for i in range(0,len(s),n))\n\nThen you can call it like this:\nsome_list = list(range(0, 20)) # creates a list of 20 elements\ngenerator = comb(some_list, 4) # creates a generator that will generate lists of 4 elements\nfor sublist in generator:\n print(sublist) # prints a sublist of four elements, as it's generated\n\nOf course, you don't have to assign the generator to a variable; you can just loop over it directly like this:\nfor sublist in comb(some_list, 4):\n print(sublist) # prints a sublist of four elements, as it's generated\n\nAs a bonus, this comb() function also operates on strings:\nlist( comb('catdogant', 3) ) # returns ['cat', 'dog', 'ant']\n\n", "A generic chunker for any iterable, which gives the user a choice of how to handle a partial chunk at the end.\nTested on Python 3.\nchunker.py\nfrom enum import Enum\n\nclass PartialChunkOptions(Enum):\n INCLUDE = 0\n EXCLUDE = 1\n PAD = 2\n ERROR = 3\n\nclass PartialChunkException(Exception):\n pass\n\ndef chunker(iterable, n, on_partial=PartialChunkOptions.INCLUDE, pad=None):\n \"\"\"\n A chunker yielding n-element lists from an iterable, with various options\n about what to do about a partial chunk at the end.\n\n on_partial=PartialChunkOptions.INCLUDE (the default):\n include the partial chunk as a short (<n) element list\n\n on_partial=PartialChunkOptions.EXCLUDE\n do not include the partial chunk\n\n on_partial=PartialChunkOptions.PAD\n pad to an n-element list \n (also pass pad=<pad_value>, default None)\n\n on_partial=PartialChunkOptions.ERROR\n raise a RuntimeError if a partial chunk is encountered\n \"\"\"\n\n on_partial = PartialChunkOptions(on_partial) \n\n iterator = iter(iterable)\n while True:\n vals = []\n for i in range(n):\n try:\n vals.append(next(iterator))\n except StopIteration:\n if vals:\n if on_partial == PartialChunkOptions.INCLUDE:\n yield vals\n elif on_partial == PartialChunkOptions.EXCLUDE:\n pass\n elif on_partial == PartialChunkOptions.PAD:\n yield vals + [pad] * (n - len(vals))\n elif on_partial == PartialChunkOptions.ERROR:\n raise PartialChunkException\n return\n return\n yield vals\n\ntest.py\nimport chunker\n\nchunk_size = 3\n\nfor it in (range(100, 107),\n range(100, 109)):\n\n print(\"\\nITERABLE TO CHUNK: {}\".format(it))\n print(\"CHUNK SIZE: {}\".format(chunk_size))\n\n for option in chunker.PartialChunkOptions.__members__.values():\n print(\"\\noption {} used\".format(option))\n try:\n for chunk in chunker.chunker(it, chunk_size, on_partial=option):\n print(chunk)\n except chunker.PartialChunkException:\n print(\"PartialChunkException was raised\")\n print(\"\")\n\noutput of test.py\n\nITERABLE TO CHUNK: range(100, 107)\nCHUNK SIZE: 3\n\noption PartialChunkOptions.INCLUDE used\n[100, 101, 102]\n[103, 104, 105]\n[106]\n\noption PartialChunkOptions.EXCLUDE used\n[100, 101, 102]\n[103, 104, 105]\n\noption PartialChunkOptions.PAD used\n[100, 101, 102]\n[103, 104, 105]\n[106, None, None]\n\noption PartialChunkOptions.ERROR used\n[100, 101, 102]\n[103, 104, 105]\nPartialChunkException was raised\n\n\nITERABLE TO CHUNK: range(100, 109)\nCHUNK SIZE: 3\n\noption PartialChunkOptions.INCLUDE used\n[100, 101, 102]\n[103, 104, 105]\n[106, 107, 108]\n\noption PartialChunkOptions.EXCLUDE used\n[100, 101, 102]\n[103, 104, 105]\n[106, 107, 108]\n\noption PartialChunkOptions.PAD used\n[100, 101, 102]\n[103, 104, 105]\n[106, 107, 108]\n\noption PartialChunkOptions.ERROR used\n[100, 101, 102]\n[103, 104, 105]\n[106, 107, 108]\n\n\n", "You may use more_itertools.chunked_even along with math.ceil. Likely the easiest to reason?\nfrom math import ceil\nimport more_itertools as mit\nfrom pprint import pprint\n\npprint([*mit.chunked_even(range(19), ceil(19 / 5))])\n# [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18]]\n\npprint([*mit.chunked_even(range(20), ceil(20 / 5))])\n# [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15], [16, 17, 18, 19]]\n\npprint([*mit.chunked_even(range(21), ceil(21 / 5))])\n# [[0, 1, 2, 3, 4],\n# [5, 6, 7, 8],\n# [9, 10, 11, 12],\n# [13, 14, 15, 16],\n# [17, 18, 19, 20]]\n\npprint([*mit.chunked_even(range(3), ceil(3 / 5))])\n# [[0], [1], [2]]\n\n\n\n", "The recipes in the itertools module provide two ways to do this depending on how you want to handle a final odd-sized lot (keep it, pad it with a fillvalue, ignore it, or raise an exception):\nfrom itertools import islice, izip_longest\n\ndef batched(iterable, n):\n \"Batch data into lists of length n. The last batch may be shorter.\"\n # batched('ABCDEFG', 3) --> ABC DEF G\n it = iter(iterable)\n while True:\n batch = list(islice(it, n))\n if not batch:\n return\n yield batch\n\ndef grouper(iterable, n, *, incomplete='fill', fillvalue=None):\n \"Collect data into non-overlapping fixed-length chunks or blocks\"\n # grouper('ABCDEFG', 3, fillvalue='x') --> ABC DEF Gxx\n # grouper('ABCDEFG', 3, incomplete='strict') --> ABC DEF ValueError\n # grouper('ABCDEFG', 3, incomplete='ignore') --> ABC DEF\n args = [iter(iterable)] * n\n if incomplete == 'fill':\n return zip_longest(*args, fillvalue=fillvalue)\n if incomplete == 'strict':\n return zip(*args, strict=True)\n if incomplete == 'ignore':\n return zip(*args)\n else:\n raise ValueError('Expected fill, strict, or ignore')\n\n", "I dislike idea of splitting elements by chunk size, e.g. script can devide 101 to 3 chunks as [50, 50, 1]. For my needs I needed spliting proportionly, and keeping order same. First I wrote my own script, which works fine, and it's very simple. But I've seen later this answer, where script is better than mine, I reccomend it.\nHere's my script:\ndef proportional_dividing(N, n):\n \"\"\"\n N - length of array (bigger number)\n n - number of chunks (smaller number)\n output - arr, containing N numbers, diveded roundly to n chunks\n \"\"\"\n arr = []\n if N == 0:\n return arr\n elif n == 0:\n arr.append(N)\n return arr\n r = N // n\n for i in range(n-1):\n arr.append(r)\n arr.append(N-r*(n-1))\n\n last_n = arr[-1]\n # last number always will be r <= last_n < 2*r\n # when last_n == r it's ok, but when last_n > r ...\n if last_n > r:\n # ... and if difference too big (bigger than 1), then\n if abs(r-last_n) > 1:\n #[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 7] # N=29, n=12\n # we need to give unnecessary numbers to first elements back\n diff = last_n - r\n for k in range(diff):\n arr[k] += 1\n arr[-1] = r\n # and we receive [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2]\n return arr\n\ndef split_items(items, chunks):\n arr = proportional_dividing(len(items), chunks)\n splitted = []\n for chunk_size in arr:\n splitted.append(items[:chunk_size])\n items = items[chunk_size:]\n print(splitted)\n return splitted\n\nitems = [1,2,3,4,5,6,7,8,9,10,11]\nchunks = 3\nsplit_items(items, chunks)\nsplit_items(['a','b','c','d','e','f','g','h','i','g','k','l', 'm'], 3)\nsplit_items(['a','b','c','d','e','f','g','h','i','g','k','l', 'm', 'n'], 3)\nsplit_items(range(100), 4)\nsplit_items(range(99), 4)\nsplit_items(range(101), 4)\n\nand output:\n[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11]]\n[['a', 'b', 'c', 'd'], ['e', 'f', 'g', 'h'], ['i', 'g', 'k', 'l', 'm']]\n[['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'g'], ['k', 'l', 'm', 'n']]\n[range(0, 25), range(25, 50), range(50, 75), range(75, 100)]\n[range(0, 25), range(25, 50), range(50, 75), range(75, 99)]\n[range(0, 25), range(25, 50), range(50, 75), range(75, 101)]\n\n", "If you don't care about the order:\n> from itertools import groupby\n> batch_no = 3\n> data = 'abcdefgh'\n\n> [\n [x[1] for x in x[1]] \n for x in \n groupby(\n sorted(\n (x[0] % batch_no, x[1]) \n for x in \n enumerate(data)\n ),\n key=lambda x: x[0]\n )\n ]\n\n[['a', 'd', 'g'], ['b', 'e', 'h'], ['c', 'f']]\n\n\nThis solution doesn't generates sets of same size, but distributes values so batches are as big as possible while keeping the number of generated batches.\n", "def main():\n print(chunkify([1,2,3,4,5,6],2))\n\ndef chunkify(list, n):\n chunks = []\n for i in range(0, len(list), n):\n chunks.append(list[i:i+n])\n return chunks\n\nmain()\n\nI think that it's simple and can give you a chunk of an array.\n", "I've created these two fancy one-liners which are efficient and lazy, both input and output are iterables, also they doen't depend on any module:\nFirst one-liner is totally lazy meaning that it returns iterator producing iterators (i.e. each chunk produced is iterator iterating over chunk's elements), this version is good for the case if chunks are very large or elements are produced slowly one by one and should become available immediately as they are produced:\nTry it online!\nchunk_iters = lambda it, n: ((e for i, g in enumerate(((f,), cit)) for j, e in zip(range((1, n - 1)[i]), g)) for cit in (iter(it),) for f in cit)\n\nSecond one-liner returns iterator that produces lists. Each list is produced as soon as elements of whole chunk become available through input iterator or if very last element of last chunk is reached. This version should be used if input elements are produced fast or all available immediately. Other wise first more-lazy one-liner version should be used.\nTry it online!\nchunk_lists = lambda it, n: (l for l in ([],) for i, g in enumerate((it, ((),))) for e in g for l in (l[:len(l) % n] + [e][:1 - i],) if (len(l) % n == 0) != i)\n\nAlso I provide multi-line version of first chunk_iters one-liner, which returns iterator producing another iterators (going through each chunk's elements):\nTry it online!\ndef chunk_iters(it, n):\n cit = iter(it)\n def one_chunk(f):\n yield f\n for i, e in zip(range(n - 1), cit):\n yield e\n for f in cit:\n yield one_chunk(f)\n\n", "Although there is a lot of answers I have very simple way:\n\nx = list(range(10, 75))\nindices = x[0::10]\nprint(\"indices: \", indices)\nxx = [x[i-10:i] for i in indices ]\nprint(\"x= \", x)\nprint (\"xx= \",xx)\n\n\nthe result will be :\n\nindices: [10, 20, 30, 40, 50, 60, 70] x= [10, 11, 12, 13, 14, 15,\n16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,\n33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,\n67, 68, 69, 70, 71, 72, 73, 74]\nxx = [[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],\n[20, 21, 22, 23, 24, 25,26, 27, 28, 29],\n[30, 31, 32, 33, 34, 35, 36, 37, 38, 39],\n[40, 41, 42, 43, 44, 45, 46, 47, 48, 49],\n[50, 51, 52, 53, 54, 55, 56, 57, 58, 59],\n[60, 61, 62, 63, 64, 65, 66, 67, 68, 69],\n[70, 71, 72, 73, 74]]\n\n", "Let's say the list is lst\nimport math\n\n# length of the list len(lst) is ln\n# size of a chunk is size\n\nfor num in range ( math.ceil(ln/size) ):\n start, end = num*size, min((num+1)*size, ln)\n print(lst[start:end])\n\n", "User @tzot's solution zip_longest(*[iter(lst)]*n, fillvalue=padvalue) is very elegant but if the length of lst is not divisible by n, it pads the last sublist to keep its length match that of the other sublists. However, if that's not desirable, then simply using zip() to produce similar round-robin zips and appending the remaining elements of lst (that cannot make a \"whole\" sublist) to the output should do the trick.\nlist(map(list, zip(*[iter(lst)]*n))) + ([rest] if (rest:=lst[len(lst)//n*n : ]) else [])\n\nThe above one-liner is perhaps more readable wrapped in a function. Unlike the other functions on here, it produces a list not a generator. Depending on the use case, that may or may not be desirable.\ndef chunkify(lst, chunk_size):\n nested = list(map(list, zip(*[iter(lst)]*chunk_size)))\n rest = lst[len(lst)//chunk_size*chunk_size: ]\n if rest:\n nested.append(rest)\n return nested\n\nIt's faster than some of the most popular answers on here that produce the same output.\nmy_list, n = list(range(1_000_000)), 12\n\n%timeit list(chunks(my_list, n)) # @Ned_Batchelder\n# 36.4 ms ± 1.6 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n\n%timeit [my_list[i:i+n] for i in range(0, len(my_list), n)] # @Ned_Batchelder\n# 34.6 ms ± 1.12 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n\n%timeit it = iter(my_list); list(iter(lambda: list(islice(it, n)), [])) # @senderle\n# 60.6 ms ± 5.36 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n\n%timeit list(mit.chunked(my_list, n)) # @pylang\n# 59.4 ms ± 4.92 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n\n%timeit chunkify(my_list, n)\n# 25.8 ms ± 1.84 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n\n" ]
[ 4228, 651, 389, 341, 284, 120, 85, 65, 64, 63, 56, 51, 40, 28, 26, 26, 23, 20, 19, 16, 13, 13, 13, 12, 11, 11, 8, 8, 7, 6, 6, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 ]
[ "Lazy loading version\n\nimport pprint\npprint.pprint(list(chunks(range(10, 75), 10)))\n[range(10, 20),\n range(20, 30),\n range(30, 40),\n range(40, 50),\n range(50, 60),\n range(60, 70),\n range(70, 75)]\n\n Confer this implementation's result with the example usage result of the accepted answer. \n\nMany of the above functions assume that the length of the whole iterable are known up front, or at least are cheap to calculate.\nFor some streamed objects that would mean loading the full data into memory first (e.g. to download the whole file) to get the length information.\nIf you however don't know the the full size yet, you can use this code instead:\ndef chunks(iterable, size):\n \"\"\"\n Yield successive chunks from iterable, being `size` long.\n\n https://stackoverflow.com/a/55776536/3423324\n :param iterable: The object you want to split into pieces.\n :param size: The size each of the resulting pieces should have.\n \"\"\"\n i = 0\n while True:\n sliced = iterable[i:i + size]\n if len(sliced) == 0:\n # to suppress stuff like `range(max, max)`.\n break\n # end if\n yield sliced\n if len(sliced) < size:\n # our slice is not the full length, so we must have passed the end of the iterator\n break\n # end if\n i += size # so we start the next chunk at the right place.\n # end while\n# end def\n\nThis works because the slice command will return less/no elements if you passed the end of an iterable:\n\"abc\"[0:2] == 'ab'\n\"abc\"[2:4] == 'c'\n\"abc\"[4:6] == ''\n\nWe now use that result of the slice, and calculate the length of that generated chunk. If it is less than what we expect, we know we can end the iteration.\nThat way the iterator will not be executed unless access.\n", "An old school approach that does not require itertools but still works with arbitrary generators:\ndef chunks(g, n):\n \"\"\"divide a generator 'g' into small chunks\n Yields:\n a chunk that has 'n' or less items\n \"\"\"\n n = max(1, n)\n buff = []\n for item in g:\n buff.append(item)\n if len(buff) == n:\n yield buff\n buff = []\n if buff:\n yield buff\n\n", "from itertools import islice\nl=[1,2,3,4,5,6]\nchuncksize=input(\"Enter chunk size\")\nm=[]\nobj=iter(l)\nm.append(list(islice(l,3)))\nm.append(list(islice(l,3)))\nprint(m)\n\n", "This task can be easily done using the generator in the accepted answer. I'm adding class implementation that implements length methods, which may be useful to somebody. I needed to know the progress (with tqdm) so the generator should've returned the number of chunks.\nclass ChunksIterator(object):\n def __init__(self, data, n):\n self._data = data\n self._l = len(data)\n self._n = n\n\n def __iter__(self):\n for i in range(0, self._l, self._n):\n yield self._data[i:i + self._n]\n\n def __len__(self):\n rem = 1 if self._l % self._n != 0 else 0\n return self._l // self._n + rem\n\nUsage:\nit = ChunksIterator([1,2,3,4,5,6,7,8,9], 2)\nprint(len(it))\nfor i in it:\n print(i)\n\n", "One-liner version of senderle's answer:\nfrom itertools import islice\nfrom functools import partial\n\nseq = [1,2,3,4,5,6,7]\nsize = 3\nresult = list(iter(partial(lambda it: tuple(islice(it, size)), iter(seq)), ()))\nassert result == [(1, 2, 3), (4, 5, 6), (7,)]\n\n", "You should use itertools\na = [1, 2, 3, 4]\nfor i, k in more_itertools.pairwise(a):\n result += compute(i,k)\n\nThis will call function compute on every two consequent elements of list\n", "using List Comprehensions of python\n[range(t,t+10) for t in range(1,1000,10)]\n\n[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25, 26, 27, 28, 29, 30],....\n ....[981, 982, 983, 984, 985, 986, 987, 988, 989, 990],\n [991, 992, 993, 994, 995, 996, 997, 998, 999, 1000]]\n\nvisit this link to know about List Comprehensions\n", "Yes, it is an old question, but I had to post this one, because it is even a little shorter than the similar ones.\nYes, the result looks scrambled, but if it is just about even length...\n>>> n = 3 # number of groups\n>>> biglist = range(30)\n>>>\n>>> [ biglist[i::n] for i in xrange(n) ]\n[[0, 3, 6, 9, 12, 15, 18, 21, 24, 27],\n [1, 4, 7, 10, 13, 16, 19, 22, 25, 28],\n [2, 5, 8, 11, 14, 17, 20, 23, 26, 29]]\n\n" ]
[ -1, -1, -1, -1, -1, -1, -2, -2 ]
[ "chunks", "list", "python", "split" ]
stackoverflow_0000312443_chunks_list_python_split.txt
Q: Apply NumPy repeat only on elements that are contained in a secondary list I'm trying to repeat certain elements within a list n-times and so far I've come to this solution: _base = ["a", "z", "c", "c", "e"] for bump_element in ["a", "b", "c"]: _base = np.repeat( np.array(_base), np.where(np.array(_base) == bump_element, 2, 1) ) So far this works, _base will be ['a' 'a' 'z' 'c' 'c' 'c' 'c' 'e']. However, I'm trying to make it faster by removing the for loop so that within a single repeat I can catch all elements. Something like: _base = np.repeat( np.array(_base), np.where(np.array(_base) in ["a", "b", "c"], 2, 1) ) But that won't work since it will throw The truth value of an array with more than one element is ambiguous. Use a.any() or a.all(). Is there an easy way to make that where clause to check every item in the list against the one which contains the elements to be repeated? A: To avoid the for loop over the possible values in 'bump_element', you can use numpy isin. _base = np.array(["a", "z", "c", "c", "e"]) bump = np.array(["a", "b", "c"]) np.repeat(_base, np.where(np.isin(_base, bump), 2, 1))
Apply NumPy repeat only on elements that are contained in a secondary list
I'm trying to repeat certain elements within a list n-times and so far I've come to this solution: _base = ["a", "z", "c", "c", "e"] for bump_element in ["a", "b", "c"]: _base = np.repeat( np.array(_base), np.where(np.array(_base) == bump_element, 2, 1) ) So far this works, _base will be ['a' 'a' 'z' 'c' 'c' 'c' 'c' 'e']. However, I'm trying to make it faster by removing the for loop so that within a single repeat I can catch all elements. Something like: _base = np.repeat( np.array(_base), np.where(np.array(_base) in ["a", "b", "c"], 2, 1) ) But that won't work since it will throw The truth value of an array with more than one element is ambiguous. Use a.any() or a.all(). Is there an easy way to make that where clause to check every item in the list against the one which contains the elements to be repeated?
[ "To avoid the for loop over the possible values in 'bump_element', you can use numpy isin.\n_base = np.array([\"a\", \"z\", \"c\", \"c\", \"e\"])\nbump = np.array([\"a\", \"b\", \"c\"])\nnp.repeat(_base, np.where(np.isin(_base, bump), 2, 1))\n\n" ]
[ 1 ]
[]
[]
[ "numpy", "python" ]
stackoverflow_0074534130_numpy_python.txt
Q: Extract nodes from json based on user input preserveing a portion of the higher level object as well need to extract object from the given json based on the node chain passed by user and neglect those which are not in user input, then create a new json object my master json is : { "menustructure": [ { "node":"Admin", "path":"admin", "child":[ { "id": "resouce0", "node": "Admin.resouce0", "path":"resouce0", "rank":0, "child":[ { "id": "res_child", "node": "Admin.resouce0.res_child", "path":"res_child", "rank":1 }, { "id": "res_child2", "node": "Admin.resouce0.res_child2", "path":"res_child", "rank":1 } ] }, { "id": "resouce1", "node": "Admin.resouce1", "path":"resouce1", "rank":1 }, { "id": "resouce2", "node":"Admin.resouce2", "path": "oath", "rank":2 } ] }, { "node":"Workspace", "path": "wsp", "child":[{ "id":"system1", "node": "Workspace.system1", "path":"sys1", "child":[{ "id": "child1", "node": "Workspace.system1.child1", "path":"ch1" }] }, { "id":"system2", "node": "Workspace.system2", "path":"sys2" } ] }]} for example if user pass ['Admin.resource1', 'Workspace'] so expeceted ouput json will be Note '.' in element of user inputted list means that node have child nodes and new json will be having all those child node details including parent node details. { "menustructure": [ { "node":"Admin", "path":"admin", "child":[ { "id": "resouce1", "node": "Admin.resouce1", "path":"resouce1", "rank":1 } ] }, { "node":"Workspace", "path": "wsp", "child":[{ "id": "system1", "node": "Workspace.system1", "path":"sys1" "child":[{ "id": "child1", "node": "Workspace.system1.child1", "path":"ch1" }, { "id": "system2", "node": "Workspace.system2", "path":"sys2" } ] } ] } or another example is : ['Admin.resouce2', 'workspace.system1'] then expected json will be: { "menustructure": [ { "node":"Admin", "path":"admin", "child":[ {"id": "resouce2","node":"Admin.resouce2", "path": "oath", "rank":2 } ] }, { "node":"Workspace", "path": "wsp", "child":[{ "id": "system1", "node": "Workspace.system1", "path":"sys1" "child":[{ "id": "child1", "node": "Workspace.system1.child1", "path":"ch1" } ] } ] } or if only single node passed ['Admin'] then output json will be: { "menustructure": [ { "node":"Admin", "path":"admin", "child":[ { "id": "resouce1", "node": "Admin.resouce1", "path":"resouce1", "rank":1 }, {"id": "resouce2","node":"Admin.resouce2", "path": "oath", "rank":2 } ] } ] } Code I tried is working for one level of child: master = json.loads(m) menustruct = [] test_master = master['menustructure'] temp_json = test_master nde = ['Admin.resouce1', 'Admin.resouce0', 'Workspace.system2'] temp_data = master['menustructure'] #print(temp_data) final_data = [] parent_node = [] for m in nde: items = copy.deepcopy(temp_data) if "." in m: menu_series = m.split(".") for item in items: if item['node'] == menu_series[0]: item_child_nodes = item['child'] child = None for node in item_child_nodes: if node['id'] != menu_series[1]: item_child_nodes.remove(node) else: child = node if menu_series[0] in parent_node: for i in final_data: if i['node'] == menu_series[0]: i['child'].append(child) else: final_data.append(item) #print(item_child_nodes) parent_node.append(menu_series[0]) else: for item in items: if item['node'] == m: final_data.append(item) t = {} t['menustructure'] = final_data print(t) but not getting how to handle multiple child level for example {master -> child -> child} or {master -> child -> child -> child} multilevel child is present in Workspace.system1 If child parent already exist then child should get appended into parent node in resulting json I tried Glom lib but it's not working as intended. Any help on how to achieve multi level child problem. A: Here is my solution. The idea is to traverse the structure recursively and remove nodes that don't match user input. The algorithm does not mutate the input data, but creates a shallow copy of the subtree only when the child attribute is changed. def extract(data, query): return { "menustructure": extract_nodes( data["menustructure"], [x.split(".") for x in query] ) } def matches(name, query): name = name.split(".") for q in query: size = min(len(q), len(name)) if name[:size] == q[:size]: return True return False def extract_nodes(data, query): if isinstance(data, list): data = [ extract_nodes(x, query) for x in data if matches(x["node"], query) ] return [x for x in data if x is not None] if isinstance(data, dict) and matches(data["node"], query): if "child" in data: children = extract_nodes(data["child"], query) if len(children) != len(data["child"]): data = data.copy() # copy-on-write data["child"] = children if not data["child"]: return None return data Usage: import json data = json.loads(m) result = extract(data, ["Admin.resouce1", "Workspace.system1"]) print(json.dumps(result, indent=4)) result = extract(data, ["Admin.resouce0.res_child"]) print(json.dumps(result, indent=4))
Extract nodes from json based on user input preserveing a portion of the higher level object as well
need to extract object from the given json based on the node chain passed by user and neglect those which are not in user input, then create a new json object my master json is : { "menustructure": [ { "node":"Admin", "path":"admin", "child":[ { "id": "resouce0", "node": "Admin.resouce0", "path":"resouce0", "rank":0, "child":[ { "id": "res_child", "node": "Admin.resouce0.res_child", "path":"res_child", "rank":1 }, { "id": "res_child2", "node": "Admin.resouce0.res_child2", "path":"res_child", "rank":1 } ] }, { "id": "resouce1", "node": "Admin.resouce1", "path":"resouce1", "rank":1 }, { "id": "resouce2", "node":"Admin.resouce2", "path": "oath", "rank":2 } ] }, { "node":"Workspace", "path": "wsp", "child":[{ "id":"system1", "node": "Workspace.system1", "path":"sys1", "child":[{ "id": "child1", "node": "Workspace.system1.child1", "path":"ch1" }] }, { "id":"system2", "node": "Workspace.system2", "path":"sys2" } ] }]} for example if user pass ['Admin.resource1', 'Workspace'] so expeceted ouput json will be Note '.' in element of user inputted list means that node have child nodes and new json will be having all those child node details including parent node details. { "menustructure": [ { "node":"Admin", "path":"admin", "child":[ { "id": "resouce1", "node": "Admin.resouce1", "path":"resouce1", "rank":1 } ] }, { "node":"Workspace", "path": "wsp", "child":[{ "id": "system1", "node": "Workspace.system1", "path":"sys1" "child":[{ "id": "child1", "node": "Workspace.system1.child1", "path":"ch1" }, { "id": "system2", "node": "Workspace.system2", "path":"sys2" } ] } ] } or another example is : ['Admin.resouce2', 'workspace.system1'] then expected json will be: { "menustructure": [ { "node":"Admin", "path":"admin", "child":[ {"id": "resouce2","node":"Admin.resouce2", "path": "oath", "rank":2 } ] }, { "node":"Workspace", "path": "wsp", "child":[{ "id": "system1", "node": "Workspace.system1", "path":"sys1" "child":[{ "id": "child1", "node": "Workspace.system1.child1", "path":"ch1" } ] } ] } or if only single node passed ['Admin'] then output json will be: { "menustructure": [ { "node":"Admin", "path":"admin", "child":[ { "id": "resouce1", "node": "Admin.resouce1", "path":"resouce1", "rank":1 }, {"id": "resouce2","node":"Admin.resouce2", "path": "oath", "rank":2 } ] } ] } Code I tried is working for one level of child: master = json.loads(m) menustruct = [] test_master = master['menustructure'] temp_json = test_master nde = ['Admin.resouce1', 'Admin.resouce0', 'Workspace.system2'] temp_data = master['menustructure'] #print(temp_data) final_data = [] parent_node = [] for m in nde: items = copy.deepcopy(temp_data) if "." in m: menu_series = m.split(".") for item in items: if item['node'] == menu_series[0]: item_child_nodes = item['child'] child = None for node in item_child_nodes: if node['id'] != menu_series[1]: item_child_nodes.remove(node) else: child = node if menu_series[0] in parent_node: for i in final_data: if i['node'] == menu_series[0]: i['child'].append(child) else: final_data.append(item) #print(item_child_nodes) parent_node.append(menu_series[0]) else: for item in items: if item['node'] == m: final_data.append(item) t = {} t['menustructure'] = final_data print(t) but not getting how to handle multiple child level for example {master -> child -> child} or {master -> child -> child -> child} multilevel child is present in Workspace.system1 If child parent already exist then child should get appended into parent node in resulting json I tried Glom lib but it's not working as intended. Any help on how to achieve multi level child problem.
[ "Here is my solution. The idea is to traverse the structure recursively and remove nodes that don't match user input. The algorithm does not mutate the input data, but creates a shallow copy of the subtree only when the child attribute is changed.\ndef extract(data, query):\n return {\n \"menustructure\": extract_nodes(\n data[\"menustructure\"], [x.split(\".\") for x in query]\n )\n }\n\n\ndef matches(name, query):\n name = name.split(\".\")\n for q in query:\n size = min(len(q), len(name))\n if name[:size] == q[:size]:\n return True\n\n return False\n\n\ndef extract_nodes(data, query):\n if isinstance(data, list):\n data = [\n extract_nodes(x, query)\n for x in data\n if matches(x[\"node\"], query)\n ]\n return [x for x in data if x is not None]\n\n if isinstance(data, dict) and matches(data[\"node\"], query):\n if \"child\" in data:\n children = extract_nodes(data[\"child\"], query)\n if len(children) != len(data[\"child\"]):\n data = data.copy() # copy-on-write\n data[\"child\"] = children\n if not data[\"child\"]:\n return None\n return data\n\nUsage:\nimport json\ndata = json.loads(m)\nresult = extract(data, [\"Admin.resouce1\", \"Workspace.system1\"])\nprint(json.dumps(result, indent=4))\n\n\nresult = extract(data, [\"Admin.resouce0.res_child\"])\nprint(json.dumps(result, indent=4))\n\n" ]
[ 1 ]
[]
[]
[ "glom", "json", "python", "python_3.x", "python_jsons" ]
stackoverflow_0074506548_glom_json_python_python_3.x_python_jsons.txt
Q: Create two columns from the same columns but in different ways From the table below, I would like to create two columns that aggregate 'amount' depending on the value of 'number' and 'type'. number type amount 1 A 10 1 A 20 2 A 10 3 B 20 2 B 10 1 B 20 Here's the table I would like to get. The first column I want to create is 'amount A', which is the aggregation of the rows with 'A' in 'type' grouped by 'number'. The other one 'amount A+B' is the aggregation of all the rows grouped by 'number' regardless the value of 'type'. number amount A amount A+B 1 30 50 2 10 20 3 0 20 I only came up with the way to create subsets and create two columns separately. But I wonder if there is more efficient way. A: You can try this: out = ( df.astype({'number': 'category'}) .query('type == "A"') .groupby(['number'])['amount'].sum() .to_frame('amount A') ) out['amount A+B'] = df.groupby('number')['amount'].sum() print(out) amount A amount A+B number 1 30 50 2 10 20 3 0 20 One of the tricks is to convert the 'number' column to a categorical so that we have a resultant sum for all numbers even if a number doesn't appear with 'type A'. Once we do that, we can very easily perform a groupby across the numbers with an without the rows where type == "A".
Create two columns from the same columns but in different ways
From the table below, I would like to create two columns that aggregate 'amount' depending on the value of 'number' and 'type'. number type amount 1 A 10 1 A 20 2 A 10 3 B 20 2 B 10 1 B 20 Here's the table I would like to get. The first column I want to create is 'amount A', which is the aggregation of the rows with 'A' in 'type' grouped by 'number'. The other one 'amount A+B' is the aggregation of all the rows grouped by 'number' regardless the value of 'type'. number amount A amount A+B 1 30 50 2 10 20 3 0 20 I only came up with the way to create subsets and create two columns separately. But I wonder if there is more efficient way.
[ "You can try this:\nout = (\n df.astype({'number': 'category'})\n .query('type == \"A\"')\n .groupby(['number'])['amount'].sum()\n .to_frame('amount A')\n)\n\nout['amount A+B'] = df.groupby('number')['amount'].sum()\n\nprint(out)\n amount A amount A+B\nnumber \n1 30 50\n2 10 20\n3 0 20\n\nOne of the tricks is to convert the 'number' column to a categorical so that we have a resultant sum for all numbers even if a number doesn't appear with 'type A'.\nOnce we do that, we can very easily perform a groupby across the numbers with an without the rows where type == \"A\".\n" ]
[ 1 ]
[]
[]
[ "python" ]
stackoverflow_0074534236_python.txt
Q: Django form is not saving when I have action = to desired url If I use action="" in my django form, the form works properly but sends the user to the wrong page. I want the user to go back to the macro/ page upon form submission, but when I add that url to action (like action="{% url 'macro' %}", it goes to the page but the form doesn't save. Any suggestion on how to handle this? Code below: (Option 1) macro_update.html -> the form here works properly, but takes the user to the wrong page <ul> <form action="" method="post"> {% csrf_token %} {{ macro_form.as_ul }} <input type="submit" value="Submit"> </form> </ul> (Option 2) macro_update.html -> the user is redirected to the right page upon submission, but the form data doesn't update/save <ul> <form action="{% url 'macro' %}" method="post"> {% csrf_token %} {{ macro_form.as_ul }} <input type="submit" value="Submit"> </form> </ul> views.py @login_required(login_url='login') def macroUpdate(request): if request.method == "POST": macro_form = MacroForm(request.POST, instance=request.user.profile) if macro_form.is_valid(): macro_form.save() messages.success(request,('Your macros were successfully updated!')) else: messages.error(request,('Unable to complete request')) return redirect("profile") macro_form = MacroForm(instance=request.user.profile) context = {"user":request.user, "macro_form":macro_form } return render(request, 'macro_update.html', context) urls.py urlpatterns = [ path('', views.loginPage, name='login'), path('register/', views.registerPage, name='register'), path('profile/', views.profilePage, name='profile'), path('profile/update/', views.profileUpdate, name='profile-update'), path('logout/', views.logoutUser, name='logout-user'), path('macro/', views.macroPage, name='macro'), path('macro/update/', views.macroUpdate, name='macro-update'), ] A: I want the user to go back to the macro/ page upon form submission, but when I add that url to action (like action="{% url 'macro' %}", it goes to the page but the form doesn't save. It is because form data must go to macroUpdate view to save not macroPage, to redirect on macro page after form submission you can use redirect("macro") so: views.py: @login_required(login_url='login') def macroUpdate(request): if request.method == "POST": macro_form = MacroForm(request.POST, instance=request.user.profile) if macro_form.is_valid(): macro_form.save() messages.success(request,('Your macros were successfully updated!')) else: messages.error(request,('Unable to complete request')) return redirect("macro") macro_form = MacroForm(instance=request.user.profile) context = {"user":request.user, "macro_form":macro_form } return render(request, 'macro_update.html', context) Just remove the action attribute in from tag of macro_update.html since Django always takes current page route so: <ul> <form method="POST"> {% csrf_token %} {{ macro_form.as_ul }} <input type="submit" value="Submit"> </form> </ul>
Django form is not saving when I have action = to desired url
If I use action="" in my django form, the form works properly but sends the user to the wrong page. I want the user to go back to the macro/ page upon form submission, but when I add that url to action (like action="{% url 'macro' %}", it goes to the page but the form doesn't save. Any suggestion on how to handle this? Code below: (Option 1) macro_update.html -> the form here works properly, but takes the user to the wrong page <ul> <form action="" method="post"> {% csrf_token %} {{ macro_form.as_ul }} <input type="submit" value="Submit"> </form> </ul> (Option 2) macro_update.html -> the user is redirected to the right page upon submission, but the form data doesn't update/save <ul> <form action="{% url 'macro' %}" method="post"> {% csrf_token %} {{ macro_form.as_ul }} <input type="submit" value="Submit"> </form> </ul> views.py @login_required(login_url='login') def macroUpdate(request): if request.method == "POST": macro_form = MacroForm(request.POST, instance=request.user.profile) if macro_form.is_valid(): macro_form.save() messages.success(request,('Your macros were successfully updated!')) else: messages.error(request,('Unable to complete request')) return redirect("profile") macro_form = MacroForm(instance=request.user.profile) context = {"user":request.user, "macro_form":macro_form } return render(request, 'macro_update.html', context) urls.py urlpatterns = [ path('', views.loginPage, name='login'), path('register/', views.registerPage, name='register'), path('profile/', views.profilePage, name='profile'), path('profile/update/', views.profileUpdate, name='profile-update'), path('logout/', views.logoutUser, name='logout-user'), path('macro/', views.macroPage, name='macro'), path('macro/update/', views.macroUpdate, name='macro-update'), ]
[ "\nI want the user to go back to the macro/ page upon form submission, but when I add that url to action (like action=\"{% url 'macro' %}\", it goes to the page but the form doesn't save.\n\nIt is because form data must go to macroUpdate view to save not macroPage, to redirect on macro page after form submission you can use redirect(\"macro\") so:\nviews.py:\n@login_required(login_url='login')\ndef macroUpdate(request):\n\n if request.method == \"POST\":\n macro_form = MacroForm(request.POST, instance=request.user.profile)\n if macro_form.is_valid():\n macro_form.save()\n messages.success(request,('Your macros were successfully updated!'))\n else:\n messages.error(request,('Unable to complete request'))\n return redirect(\"macro\")\n\n macro_form = MacroForm(instance=request.user.profile) \n context = {\"user\":request.user, \"macro_form\":macro_form } \n return render(request, 'macro_update.html', context)\n\nJust remove the action attribute in from tag of macro_update.html since Django always takes current page route so:\n<ul>\n <form method=\"POST\">\n {% csrf_token %}\n {{ macro_form.as_ul }}\n <input type=\"submit\" value=\"Submit\">\n </form>\n</ul>\n\n" ]
[ 2 ]
[]
[]
[ "django", "django_forms", "django_templates", "django_urls", "python" ]
stackoverflow_0074534273_django_django_forms_django_templates_django_urls_python.txt
Q: Comparing next element in a list Python I'm trying to figure out how to make sure that the consecutive values are not the same in a list. Expected output: [1, 2, 3] Actual output: [1, 1, 3, 3] I also tried using next() but that gave me "list object is not an iterator" What is best practices here and what am I doing wrong? def unique_in_order(iterable): return [x for x in iterable if not iterable[x] == iterable[x+1]] print(unique_in_order([1,1,2,2,3,3])) A: Do it without list comprehensions. Create a list with the first element and iterate over the following pairs def unique_in_order(iterable): lst = [iterable[0]] for x in range(len(iterable) - 1): if iterable[x] != iterable[x + 1]: lst.append(iterable[x + 1]) return lst you can also use zip def unique_in_order(iterable): lst = [iterable[0]] for x, y in zip(iterable, iterable[1:]): if x != y: lst.append(y) return lst A: If you do it without a list comprehension, you can get better control flow and solve your problem: def unique_in_order(iterable): list = [] for index, x in enumerate(iterable): if index == len(iterable) -1: list.append(x) elif iterable[index] != iterable[index+1]: list.append(x) return list A: Here's a way to do it using a generator. It assumes None is not the first value in the list. def unique(lst): prev = None for val in lst: if val != prev: prev = val yield val print(list(unique([1,1,2,2,3,3,1,1]))) A: You can use a list comprehension, but make sure to add the last element to the returned list. def unique_in_order(lst): return [lst[i] for i in range(len(lst)-1) if lst[i] != lst[i+1]] + [lst[-1]] A: The simplest way would be using itertools.groupby(): from itertools import groupby def unique_in_order(iterable): return [i[0] for i in groupby(iterable)] It will work for any iterable, not only lists.
Comparing next element in a list Python
I'm trying to figure out how to make sure that the consecutive values are not the same in a list. Expected output: [1, 2, 3] Actual output: [1, 1, 3, 3] I also tried using next() but that gave me "list object is not an iterator" What is best practices here and what am I doing wrong? def unique_in_order(iterable): return [x for x in iterable if not iterable[x] == iterable[x+1]] print(unique_in_order([1,1,2,2,3,3]))
[ "Do it without list comprehensions. Create a list with the first element and iterate over the following pairs\ndef unique_in_order(iterable):\n lst = [iterable[0]]\n for x in range(len(iterable) - 1):\n if iterable[x] != iterable[x + 1]:\n lst.append(iterable[x + 1])\n return lst\n\nyou can also use zip\ndef unique_in_order(iterable):\n lst = [iterable[0]]\n for x, y in zip(iterable, iterable[1:]):\n if x != y:\n lst.append(y)\n return lst\n\n", "If you do it without a list comprehension, you can get better control flow and solve your problem:\ndef unique_in_order(iterable):\n list = []\n\n for index, x in enumerate(iterable):\n if index == len(iterable) -1:\n list.append(x)\n elif iterable[index] != iterable[index+1]:\n list.append(x)\n\n return list\n\n", "Here's a way to do it using a generator. It assumes None is not the first value in the list.\ndef unique(lst):\n prev = None\n for val in lst:\n if val != prev:\n prev = val\n yield val\n\nprint(list(unique([1,1,2,2,3,3,1,1])))\n\n", "You can use a list comprehension, but make sure to add the last element to the returned list.\ndef unique_in_order(lst):\n return [lst[i] for i in range(len(lst)-1) if lst[i] != lst[i+1]] + [lst[-1]]\n\n", "The simplest way would be using itertools.groupby():\nfrom itertools import groupby\n\ndef unique_in_order(iterable):\n return [i[0] for i in groupby(iterable)]\n\nIt will work for any iterable, not only lists.\n" ]
[ 2, 0, 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074533740_python.txt
Q: Python selenium and captcha I have a scraping bot which I want to stop whenever it encounters a captcha, so not to annoy the websites. But selenium can't find it driver.find_element_by_xpath("//*[@id='recaptcha-anchor']") This is the xpath chrome gave me. ERROR NoSuchElementException: Unable to locate element: {"method":"xpath","selector":"//*[@id='recaptcha-anchor']"} Any ideas why this does not work? A: AFAIK, captcha usually located inside an iframe, so you can try to switch to iframe before searching for required element: frame = driver.find_element_by_xpath('//iframe[contains(@src, "recaptcha")]') driver.switch_to.frame(frame) driver.find_element_by_xpath("//*[@id='recaptcha-anchor']") If you need to switch back from iframe: driver.switch_to.default_content() A: For byPass The reCAPTCHAv2 is within an <iframe..> so you have to: Pick the Captchav2 page and define URL. Induce WebDriverWait for the desired frame to be available and switch to it. Induce WebDriverWait for the desired element to be clickable. from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC driver.get(URL) WebDriverWait(driver, 20).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe[src^='https://www.google.com/recaptcha/api2/anchor']"))) WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div.recaptcha-checkbox-border"))).click()
Python selenium and captcha
I have a scraping bot which I want to stop whenever it encounters a captcha, so not to annoy the websites. But selenium can't find it driver.find_element_by_xpath("//*[@id='recaptcha-anchor']") This is the xpath chrome gave me. ERROR NoSuchElementException: Unable to locate element: {"method":"xpath","selector":"//*[@id='recaptcha-anchor']"} Any ideas why this does not work?
[ "AFAIK, captcha usually located inside an iframe, so you can try to switch to iframe before searching for required element:\nframe = driver.find_element_by_xpath('//iframe[contains(@src, \"recaptcha\")]')\ndriver.switch_to.frame(frame)\ndriver.find_element_by_xpath(\"//*[@id='recaptcha-anchor']\")\n\nIf you need to switch back from iframe:\ndriver.switch_to.default_content()\n\n", "For byPass The reCAPTCHAv2 is within an <iframe..> so you have to:\n\nPick the Captchav2 page and define URL.\n\nInduce WebDriverWait for the desired frame to be available and switch to it.\n\nInduce WebDriverWait for the desired element to be clickable.\n from selenium.webdriver.support.ui import WebDriverWait\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as EC\n\n driver.get(URL)\n\n WebDriverWait(driver, 20).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,\"iframe[src^='https://www.google.com/recaptcha/api2/anchor']\")))\n WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"div.recaptcha-checkbox-border\"))).click()\n\n\n\n" ]
[ 13, 0 ]
[]
[]
[ "python", "selenium", "xpath" ]
stackoverflow_0044187909_python_selenium_xpath.txt
Q: Extendable way to select nth lowest/highest members of a list in python when there is a rank tie Working on a hacker rank challenge and feel like I really hacked it. Need to select the second lowest members of a list, and return both if there's a tie. Here's what I did: if __name__ == '__main__': names = [] scores = [] names_scores = [] for _ in range(int(input())): name = input() score = float(input()) names.append(name) scores.append(score) names_scores.append([name, score]) def second_smallest(numbers): m1 = m2 = float('inf') for x in numbers: if x <= m1: m1, m2 = x, m1 elif x < m2: m2 = x return m2 second_smallest_number = second_smallest(set(scores)) second_lowest_name_score = filter(lambda x: x[1] == second_smallest_number, names_scores) second_lowest_names = [item[0] for item in second_lowest_name_score] second_lowest_names.sort() if len(second_lowest_names) == 1: print(second_lowest_names[0]) else: print(second_lowest_names[0] + "\n" + second_lowest_names[1]) My issue with this is that even though I "passed" I'd have to write a new line of the if statement for every number of ties. I would like to know a way that this would be extensible no matter the nubmer of ties. I understanding I could pandas rank() but wondering how to do this with the standard library A: Use itertools.groupby to group the identical scores: >>> from itertools import groupby >>> def second_lowest(scores): ... ranks = (g for _, g in groupby(sorted(scores))) ... first = list(next(ranks)) ... if len(first) >= 2: ... return first # second lowest is also tied for first lowest ... return list(next(ranks)) # otherwise return second lowest rank ... >>> second_lowest([1, 1, 1, 2, 2, 3]) [1, 1, 1] >>> second_lowest([1, 2, 2, 2, 2, 2, 3, 3]) [2, 2, 2, 2, 2] For a more general-purpose function (i.e. get the nth-lowest) you could keep a count of how many individual scores you'd iterated over and return whatever group the nth score landed in. A: Here's what we ended up doing: if __name__ == '__main__': scores = [] names_scores = [] for _ in range(int(input())): name = input() score = float(input()) scores.append(score) names_scores.append([name, score]) from collections import Counter scores = sorted(Counter(scores).items()) second_lowest_score = scores[1][0] names = sorted([name_score[0] for name_score in names_scores if name_score[1] == second_lowest_score]) print("\n".join(names))
Extendable way to select nth lowest/highest members of a list in python when there is a rank tie
Working on a hacker rank challenge and feel like I really hacked it. Need to select the second lowest members of a list, and return both if there's a tie. Here's what I did: if __name__ == '__main__': names = [] scores = [] names_scores = [] for _ in range(int(input())): name = input() score = float(input()) names.append(name) scores.append(score) names_scores.append([name, score]) def second_smallest(numbers): m1 = m2 = float('inf') for x in numbers: if x <= m1: m1, m2 = x, m1 elif x < m2: m2 = x return m2 second_smallest_number = second_smallest(set(scores)) second_lowest_name_score = filter(lambda x: x[1] == second_smallest_number, names_scores) second_lowest_names = [item[0] for item in second_lowest_name_score] second_lowest_names.sort() if len(second_lowest_names) == 1: print(second_lowest_names[0]) else: print(second_lowest_names[0] + "\n" + second_lowest_names[1]) My issue with this is that even though I "passed" I'd have to write a new line of the if statement for every number of ties. I would like to know a way that this would be extensible no matter the nubmer of ties. I understanding I could pandas rank() but wondering how to do this with the standard library
[ "Use itertools.groupby to group the identical scores:\n>>> from itertools import groupby\n>>> def second_lowest(scores):\n... ranks = (g for _, g in groupby(sorted(scores)))\n... first = list(next(ranks))\n... if len(first) >= 2:\n... return first # second lowest is also tied for first lowest\n... return list(next(ranks)) # otherwise return second lowest rank\n...\n>>> second_lowest([1, 1, 1, 2, 2, 3])\n[1, 1, 1]\n>>> second_lowest([1, 2, 2, 2, 2, 2, 3, 3])\n[2, 2, 2, 2, 2]\n\nFor a more general-purpose function (i.e. get the nth-lowest) you could keep a count of how many individual scores you'd iterated over and return whatever group the nth score landed in.\n", "Here's what we ended up doing:\nif __name__ == '__main__':\n scores = []\n names_scores = []\n for _ in range(int(input())):\n name = input()\n score = float(input())\n scores.append(score)\n names_scores.append([name, score])\n\nfrom collections import Counter\n\nscores = sorted(Counter(scores).items())\n\nsecond_lowest_score = scores[1][0]\n\nnames = sorted([name_score[0] for name_score in names_scores if name_score[1] == second_lowest_score])\n\nprint(\"\\n\".join(names))\n\n" ]
[ 1, 0 ]
[]
[]
[ "list", "python", "rank" ]
stackoverflow_0074484169_list_python_rank.txt
Q: Why can’t I find a token count of a specific Solana address? I've been trying to work on a Solana program in Python using the solana.py framework. However, I've run into a bit of a problem while trying to find the amount of USDC tokens designated to a specific wallet. I've been brewing in the Solana Cookbook for hours but still haven't found anything. My code can be found below. # This is the public key of the Solana USDC token usdc_key = PublicKey('4zMMC9srt5Ri5X14GAgXhaHii3GnPAEERYPJgZJDncDU') # This function is supposed to mimick the one presented in TS by the Solana Cookbook def findUSDCBalance(): usdc_balance = client.get_token_accounts_by_owner_json_parsed(sol_address, usdc_key) return(usdc_balance) I also receive this error message, which I can't figure out due to trouble understanding the documentation (or lack thereof). AttributeError: 'PublicKey' object has no attribute 'mint' This question is not about parsing the response, but just trying to receive a proper JSON response. Is their any specific way I can do this using this framework or any existing solution? A: the fcn youre calling expects TokenAccountOpts to define the mint of the token to get from, so your code should be from solana.rpc.types import TokenAccountOpts usdc_balance = client.get_token_accounts_by_owner_json_parsed(sol_address, TokenAccountOpts(mint=usdc_key))
Why can’t I find a token count of a specific Solana address?
I've been trying to work on a Solana program in Python using the solana.py framework. However, I've run into a bit of a problem while trying to find the amount of USDC tokens designated to a specific wallet. I've been brewing in the Solana Cookbook for hours but still haven't found anything. My code can be found below. # This is the public key of the Solana USDC token usdc_key = PublicKey('4zMMC9srt5Ri5X14GAgXhaHii3GnPAEERYPJgZJDncDU') # This function is supposed to mimick the one presented in TS by the Solana Cookbook def findUSDCBalance(): usdc_balance = client.get_token_accounts_by_owner_json_parsed(sol_address, usdc_key) return(usdc_balance) I also receive this error message, which I can't figure out due to trouble understanding the documentation (or lack thereof). AttributeError: 'PublicKey' object has no attribute 'mint' This question is not about parsing the response, but just trying to receive a proper JSON response. Is their any specific way I can do this using this framework or any existing solution?
[ "the fcn youre calling expects TokenAccountOpts to define the mint of the token to get from, so your code should be\nfrom solana.rpc.types import TokenAccountOpts\nusdc_balance = client.get_token_accounts_by_owner_json_parsed(sol_address, TokenAccountOpts(mint=usdc_key))\n\n\n" ]
[ 1 ]
[]
[]
[ "attributeerror", "python", "solana" ]
stackoverflow_0074512870_attributeerror_python_solana.txt
Q: Fish weird endline character at end / Fish shell outputs ⏎ I have a Python script that prints some numbers, like this: results = [42, 21, 64, 32, 16, 8, 4, 2] for number in results: print(number, end=' ') In the console, the output of this script is: 42 21 64 32 16 8 4 2 ⏎ Why is there a weird character at the end? My IDE is LunarVim. My shell is Fish. A: Fish shell outputs ⏎ This is essentially fish's way of telling you that there is no trailing newline or "\n". Using print() will probably not result in this. In bash, the terminal may start after the output, instead of the next line.
Fish weird endline character at end / Fish shell outputs ⏎
I have a Python script that prints some numbers, like this: results = [42, 21, 64, 32, 16, 8, 4, 2] for number in results: print(number, end=' ') In the console, the output of this script is: 42 21 64 32 16 8 4 2 ⏎ Why is there a weird character at the end? My IDE is LunarVim. My shell is Fish.
[ "Fish shell outputs ⏎ \nThis is essentially fish's way of telling you that there is no trailing newline or \"\\n\". Using print() will probably not result in this. In bash, the terminal may start after the output, instead of the next line.\n" ]
[ 0 ]
[]
[]
[ "console", "fish", "python", "stdout", "terminal" ]
stackoverflow_0074531603_console_fish_python_stdout_terminal.txt
Q: Reason: a bytes-like object is required, not 'str' I want to write the content: sample = {'Details': [{'user1': '{"d8": "X121", "d0": "NIL", "d4": false, "d3": false, "d2": false}', 'name': 'set a sample'}], 'person1': 1} using code: s1 = json.dumps(sample).replace('"', '"').replace("'", "'") ftp = ssh.open_sftp() ftp.putfo(BytesIO(s1), 'newfile.txt') But getting error: Reason: <class 'TypeError'> a bytes-like object is required, not 'str' Edit 2: import json per_json = {'d8': 'X121', 'd0': 'NIL', 'd4': False, 'd3': False, 'd2': False} p1 = { "name" : "set a sample" } p1["additional_info"] = json.dumps(per_json) p1_list={"person1":1} p1_list["Details"]=[p1] s1 = json.dumps(p1_list).replace('}"', "}").replace('"{', "{").replace('\\"', '"') ftp = ssh.open_sftp() ftp.putfo(s1, 'newfile.txt') ftp.close() Reason: <class 'AttributeError'> 'str' object has no attribute 'read' A: Part of your JSON is still a string. This should do the trick s1 = json.dumps(sample).replace('}"', "}").replace('"{', "{").replace('\\"', '"') Parsing s1 using json.loads(s1) returns the following JSON {'Details': [{'user1': {'d8': 'X121', 'd0': 'NIL', 'd4': False, 'd3': False, 'd2': False}, 'name': 'set a sample'}], 'person1': 1}
Reason: a bytes-like object is required, not 'str'
I want to write the content: sample = {'Details': [{'user1': '{"d8": "X121", "d0": "NIL", "d4": false, "d3": false, "d2": false}', 'name': 'set a sample'}], 'person1': 1} using code: s1 = json.dumps(sample).replace('"', '"').replace("'", "'") ftp = ssh.open_sftp() ftp.putfo(BytesIO(s1), 'newfile.txt') But getting error: Reason: <class 'TypeError'> a bytes-like object is required, not 'str' Edit 2: import json per_json = {'d8': 'X121', 'd0': 'NIL', 'd4': False, 'd3': False, 'd2': False} p1 = { "name" : "set a sample" } p1["additional_info"] = json.dumps(per_json) p1_list={"person1":1} p1_list["Details"]=[p1] s1 = json.dumps(p1_list).replace('}"', "}").replace('"{', "{").replace('\\"', '"') ftp = ssh.open_sftp() ftp.putfo(s1, 'newfile.txt') ftp.close() Reason: <class 'AttributeError'> 'str' object has no attribute 'read'
[ "Part of your JSON is still a string.\nThis should do the trick\ns1 = json.dumps(sample).replace('}\"', \"}\").replace('\"{', \"{\").replace('\\\\\"', '\"')\n\nParsing s1 using json.loads(s1) returns the following JSON\n{'Details': [{'user1': {'d8': 'X121',\n 'd0': 'NIL',\n 'd4': False,\n 'd3': False,\n 'd2': False},\n 'name': 'set a sample'}],\n 'person1': 1}\n\n" ]
[ 0 ]
[]
[]
[ "json", "python" ]
stackoverflow_0074534034_json_python.txt
Q: can only concatenate str (not "tuple") to str? How to get rid of tuple? while True: time.sleep(SLEEP_BETWEEN_ACTIONS) input_1 = input("\n" + player1_name + ": " + random.choice(player_turn_text) + " Hit the enter to roll dice: ") print("\nRolling dice...") dice_value = get_dice_value() time.sleep(SLEEP_BETWEEN_ACTIONS) print(player1_name + " moving....") player1_current_position = snake_ladder(player1_name, player1_current_position, dice_value) check_win(player1_name, player1_current_position) if __name__ == "__main__": start() My error log says TypeError: can only concatenate str (not "tuple") to str input_1 = input("\n" + player1_name + ": " + random.choice(player_turn_text) + " Hit the enter to roll dice: ") A: I don't know the signatures of the function you use, and that are not present in the attached code. But it seems, one of the elements here: + player1_name + ": " + random.choice(player_turn_text) + Returns tuple. The simplest solution to that, would be to call it like str(player1_name) - and the same with the other one.
can only concatenate str (not "tuple") to str? How to get rid of tuple?
while True: time.sleep(SLEEP_BETWEEN_ACTIONS) input_1 = input("\n" + player1_name + ": " + random.choice(player_turn_text) + " Hit the enter to roll dice: ") print("\nRolling dice...") dice_value = get_dice_value() time.sleep(SLEEP_BETWEEN_ACTIONS) print(player1_name + " moving....") player1_current_position = snake_ladder(player1_name, player1_current_position, dice_value) check_win(player1_name, player1_current_position) if __name__ == "__main__": start() My error log says TypeError: can only concatenate str (not "tuple") to str input_1 = input("\n" + player1_name + ": " + random.choice(player_turn_text) + " Hit the enter to roll dice: ")
[ "I don't know the signatures of the function you use, and that are not present in the attached code. But it seems, one of the elements here:\n+ player1_name + \": \" + random.choice(player_turn_text) +\n\nReturns tuple.\nThe simplest solution to that, would be to call it like str(player1_name) - and the same with the other one.\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074534456_python.txt
Q: writing a program for finding nth prime term to find 10001st prime i have actually written my nth term function, which takes in n and compiles all prime numbers in the list "primes", and return the indexed position -1 of primes which is 10001st prime: please if someone can improve my code or write a better code for this problem. def nthprime(n): primes = [2] attempt = 3 while len(primes) < n: # it runs until the len of primes is greater or equal to n if all(attempt % prime != 0 for prime in primes): primes.append(attempt) attempt += 2 break return primes[-1] print(nthprime(10001)) thanks in advance for Help :) A: Your code is absolutely fine, and quite pythonic :) The algorithm you are implementing could be optimized, though, because you don't need to check every prime number as a factor for the new attempted number; for example numbers ending in a '5' are trivially not prime (I'm sure you can see why). I suggest you check out the Sieve of Eratosthenes, which is a famous prime-number sieve, i.e. an algorithm to find all the primes up to a certain number. I have linked the Wikipedia here, and from there you should also be able to find some information on more modern sieves, although they are all based on the same principle. https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes James from mCoding also made some great videos about implementing this in Python, and showed how you could scale these ideas up to get some really big lists of primes, so if you want some inspiration on how to improve your code, these would be a great place to start: https://www.youtube.com/watch?v=JA_YrFwE1hc https://www.youtube.com/watch?v=xwM8PGBYazM Good luck!
writing a program for finding nth prime term to find 10001st prime
i have actually written my nth term function, which takes in n and compiles all prime numbers in the list "primes", and return the indexed position -1 of primes which is 10001st prime: please if someone can improve my code or write a better code for this problem. def nthprime(n): primes = [2] attempt = 3 while len(primes) < n: # it runs until the len of primes is greater or equal to n if all(attempt % prime != 0 for prime in primes): primes.append(attempt) attempt += 2 break return primes[-1] print(nthprime(10001)) thanks in advance for Help :)
[ "Your code is absolutely fine, and quite pythonic :)\nThe algorithm you are implementing could be optimized, though, because you don't need to check every prime number as a factor for the new attempted number; for example numbers ending in a '5' are trivially not prime (I'm sure you can see why).\nI suggest you check out the Sieve of Eratosthenes, which is a famous prime-number sieve, i.e. an algorithm to find all the primes up to a certain number. I have linked the Wikipedia here, and from there you should also be able to find some information on more modern sieves, although they are all based on the same principle.\nhttps://en.wikipedia.org/wiki/Sieve_of_Eratosthenes\nJames from mCoding also made some great videos about implementing this in Python, and showed how you could scale these ideas up to get some really big lists of primes, so if you want some inspiration on how to improve your code, these would be a great place to start:\nhttps://www.youtube.com/watch?v=JA_YrFwE1hc\nhttps://www.youtube.com/watch?v=xwM8PGBYazM\nGood luck!\n" ]
[ 1 ]
[]
[]
[ "primes", "python", "python_3.x", "while_loop" ]
stackoverflow_0074533784_primes_python_python_3.x_while_loop.txt
Q: why does pyspark filter a string column work with integers? And why does pandas behave the other way around? When I have a pyspark dataframe with a column of numbers as strings and filter it using an integer the filter applies to the strings: df = spark.createDataFrame([ ("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ], ["id", "number"]) df.filter(col('number')==1) results in id number a 1 b 1 c 1 wheareas, when I convert it to a pandas data frame and apply the same filter, the result is an empty df pandas_df = df.toPandas() pandas_df[pandas_df['number']==1] # result id number that leads to two questions: why does the pyspark filter function matches the strings, when I filter using integers? is there a way to filter type specific in pyspark? So creating the same results as in pandas? I cool have avoided quite some time of searching for an error with that functionality A: This is the physical plan of your query: == Physical Plan == *(1) Filter (isnotnull(number#18) AND (cast(number#18 as int) = 1)) +- *(1) Scan ExistingRDD[id#17,number#18] As you can see, spark is casting the column to integer cast(number#18 as int) = 1 You can access logical and physical plans with .explain(). If you change your query by df.filter(col('number')=="1"), there will be no casting.
why does pyspark filter a string column work with integers? And why does pandas behave the other way around?
When I have a pyspark dataframe with a column of numbers as strings and filter it using an integer the filter applies to the strings: df = spark.createDataFrame([ ("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ], ["id", "number"]) df.filter(col('number')==1) results in id number a 1 b 1 c 1 wheareas, when I convert it to a pandas data frame and apply the same filter, the result is an empty df pandas_df = df.toPandas() pandas_df[pandas_df['number']==1] # result id number that leads to two questions: why does the pyspark filter function matches the strings, when I filter using integers? is there a way to filter type specific in pyspark? So creating the same results as in pandas? I cool have avoided quite some time of searching for an error with that functionality
[ "This is the physical plan of your query:\n\n== Physical Plan ==\n*(1) Filter (isnotnull(number#18) AND (cast(number#18 as int) = 1))\n+- *(1) Scan ExistingRDD[id#17,number#18]\n\nAs you can see, spark is casting the column to integer cast(number#18 as int) = 1\nYou can access logical and physical plans with .explain().\nIf you change your query by df.filter(col('number')==\"1\"), there will be no casting.\n" ]
[ 2 ]
[]
[]
[ "apache_spark", "dataframe", "pandas", "pyspark", "python" ]
stackoverflow_0074533560_apache_spark_dataframe_pandas_pyspark_python.txt
Q: Update value within nested dict of arbitrary depth without changing the rest of the dict in Python So I have a nested dictionary in Python: {'entry': {'definition': 'str', 'endTime': 'str', 'entryID': {'identifierType': 'str', 'identifierValue': 'str'}, 'instrument': {'FIB': {'FIBSpotSize': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'}, 'angleToEBeam': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'},... . . . I want a way to simply access a specific key-value pair, given an arbitrarily long keys. For instance, I want entry.instrument.angleToEBeam.uncertainty.value because I want to update the value of value. I want to write a function that takes a list of keys of arbitrary length, where the last key is the value I'd like to update, and the value which I'd like to set/update. I'm a bit confused on how to navigate/walk these nested dictionaries. As an example of what I'm trying to do, I have a dict of values to update which looks like {'entry.instrument.angleToEBeam.uncertainty.value': 2.23', ...} And I'd like to feed this list into some function such that it updates the nested dict to have values instead of the expected type, here shown just for the uncertainty parameter: {'entry': {'definition': 'str', 'endTime': 'str', 'entryID': {'identifierType': 'str', 'identifierValue': 'str'}, 'instrument': {'FIB': {'FIBSpotSize': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'}, 'angleToEBeam': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'pct', 'value': 3.102}, 'unit': 'deg', 'value': 2.23},... . . . Ultimately, these have to be exported as JSON files, hence the nested dictionary. Any ideas? Some related threads: Update value of a nested dictionary of varying depth How to completely traverse a complex dictionary of unknown depth? But I couldn't figure out how to apply them to my situation, as they deal with flattening the dict entirely. A: JMESPath is a powerful solution for querying a nested dict, but unfortunately doesn't seem to offer lvalue options (modification). In the end, here is something built from first principles: def xupdate(dct, path, value, createkeys=False): if path: k, *path = path subdct = dct.get(k, {}) if createkeys else dct[k] return {**dct, **{k: xupdate(subdct, path, value)}} return value def dotupdate(dct, dotmods, createkeys=False): for kdot, value in dotmods.items(): dct = xupdate(dct, kdot.split('.'), value, createkeys) return dct Example dct = { 'entry': { 'definition': 'str', 'endTime': 'str', 'entryID': { 'identifierType': 'str', 'identifierValue': 'str'}, 'instrument': { 'FIB': { 'FIBSpotSize': { 'notes': 'str', 'qualifier': 'str', 'uncertainty': { 'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'}, 'angleToEBeam': { 'notes': 'str', 'qualifier': 'str', 'uncertainty': { 'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'}}}}} dotmods = { 'entry.instrument.FIB.angleToEBeam.uncertainty.value': 2.23, 'entry.endTime': '2023-01-01', } >>> dotupdate(dct, dotmods) {'entry': {'definition': 'str', 'endTime': '2023-01-01', 'entryID': {'identifierType': 'str', 'identifierValue': 'str'}, 'instrument': {'FIB': {'FIBSpotSize': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'}, 'angleToEBeam': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'str', 'value': 2.23}, 'unit': 'str', 'value': 'float'}}}}} Notes createkeys: if True, keys that don't exist in the original dict (e.g. typos, incomplete path) are created. In the original question, there was a reference to the non-existent 'entry.instrument.angleToEBeam.uncertainty.value' (missing .FIB); that would create the missing keys, which is probably not desired. The order of the nested dict stays the same. The original dict itself is not modified; the returned value is a semi-copy (a deep copy for the parts that are modified; unmodified sub-dicts are copied by reference).
Update value within nested dict of arbitrary depth without changing the rest of the dict in Python
So I have a nested dictionary in Python: {'entry': {'definition': 'str', 'endTime': 'str', 'entryID': {'identifierType': 'str', 'identifierValue': 'str'}, 'instrument': {'FIB': {'FIBSpotSize': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'}, 'angleToEBeam': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'},... . . . I want a way to simply access a specific key-value pair, given an arbitrarily long keys. For instance, I want entry.instrument.angleToEBeam.uncertainty.value because I want to update the value of value. I want to write a function that takes a list of keys of arbitrary length, where the last key is the value I'd like to update, and the value which I'd like to set/update. I'm a bit confused on how to navigate/walk these nested dictionaries. As an example of what I'm trying to do, I have a dict of values to update which looks like {'entry.instrument.angleToEBeam.uncertainty.value': 2.23', ...} And I'd like to feed this list into some function such that it updates the nested dict to have values instead of the expected type, here shown just for the uncertainty parameter: {'entry': {'definition': 'str', 'endTime': 'str', 'entryID': {'identifierType': 'str', 'identifierValue': 'str'}, 'instrument': {'FIB': {'FIBSpotSize': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'str', 'value': 'float'}, 'unit': 'str', 'value': 'float'}, 'angleToEBeam': {'notes': 'str', 'qualifier': 'str', 'uncertainty': {'uncertaintyType': 'pct', 'value': 3.102}, 'unit': 'deg', 'value': 2.23},... . . . Ultimately, these have to be exported as JSON files, hence the nested dictionary. Any ideas? Some related threads: Update value of a nested dictionary of varying depth How to completely traverse a complex dictionary of unknown depth? But I couldn't figure out how to apply them to my situation, as they deal with flattening the dict entirely.
[ "JMESPath is a powerful solution for querying a nested dict, but unfortunately doesn't seem to offer lvalue options (modification).\nIn the end, here is something built from first principles:\ndef xupdate(dct, path, value, createkeys=False):\n if path:\n k, *path = path\n subdct = dct.get(k, {}) if createkeys else dct[k]\n return {**dct, **{k: xupdate(subdct, path, value)}}\n return value\n\ndef dotupdate(dct, dotmods, createkeys=False):\n for kdot, value in dotmods.items():\n dct = xupdate(dct, kdot.split('.'), value, createkeys)\n return dct\n\nExample\ndct = {\n 'entry': {\n 'definition': 'str', 'endTime': 'str', 'entryID': {\n 'identifierType': 'str', 'identifierValue': 'str'},\n 'instrument': {\n 'FIB': {\n 'FIBSpotSize': {\n 'notes': 'str', 'qualifier': 'str', 'uncertainty': {\n 'uncertaintyType': 'str', 'value': 'float'},\n 'unit': 'str', 'value': 'float'},\n 'angleToEBeam': {\n 'notes': 'str', 'qualifier': 'str', 'uncertainty': {\n 'uncertaintyType': 'str', 'value': 'float'},\n 'unit': 'str', 'value': 'float'}}}}}\n\ndotmods = {\n 'entry.instrument.FIB.angleToEBeam.uncertainty.value': 2.23,\n 'entry.endTime': '2023-01-01',\n}\n\n>>> dotupdate(dct, dotmods)\n{'entry': {'definition': 'str',\n 'endTime': '2023-01-01',\n 'entryID': {'identifierType': 'str', 'identifierValue': 'str'},\n 'instrument': {'FIB': {'FIBSpotSize': {'notes': 'str',\n 'qualifier': 'str',\n 'uncertainty': {'uncertaintyType': 'str', 'value': 'float'},\n 'unit': 'str',\n 'value': 'float'},\n 'angleToEBeam': {'notes': 'str',\n 'qualifier': 'str',\n 'uncertainty': {'uncertaintyType': 'str', 'value': 2.23},\n 'unit': 'str',\n 'value': 'float'}}}}}\n\nNotes\n\ncreatekeys: if True, keys that don't exist in the original dict (e.g. typos, incomplete path) are created. In the original question, there was a reference to the non-existent 'entry.instrument.angleToEBeam.uncertainty.value' (missing .FIB); that would create the missing keys, which is probably not desired.\nThe order of the nested dict stays the same.\nThe original dict itself is not modified; the returned value is a semi-copy (a deep copy for the parts that are modified; unmodified sub-dicts are copied by reference).\n\n" ]
[ 0 ]
[]
[]
[ "dictionary", "json", "nested", "python" ]
stackoverflow_0074533006_dictionary_json_nested_python.txt
Q: Threading and Tkinter - How to use the threading module with my simple example? I don't understand how to use the threading module properly. In this example I have two tkinter widgets, a button and a progress bar. The progress bar (configured in indeterminate mode) has to be active when the user pushes the button, and when the task is completed, the progress bar has to be stopped. import tkinter as tk from tkinter import ttk import threading, ipaddress class MainWindow: def __init__(self): self.parent=tk.Tk() self.parent.geometry("786x524+370+100") self.parent.title("Test") self.parent.configure(background="#f0f0f0") self.parent.minsize(786, 524) self.ProBar=ttk.Progressbar(self.parent, mode="indeterminate") self.ProBar.pack(padx=(40, 40), pady=(40, 40), fill=tk.BOTH) self.StartButton=ttk.Button(self.parent, text="Start", command=self.MyHeavyTask) self.StartButton.pack(padx=(40, 40), pady=(40, 40), fill=tk.BOTH) self.parent.mainloop() # my start function: def Start(self): self.ProBar.start(4) self.MyHeavyTask() self.ProBar.stop() # my real start function. it's just an example, it needs time to be completed: def MyHeavyTask(self): ls=[] obj=ipaddress.ip_network("10.0.0.0/8") for obj in list(obj.hosts()): print(obj.exploded) # start my test: if __name__=="__main__": app=MainWindow() This code has an issue, it can't run the function "MyHeavyTask" and at the same time keep active the progress bar widget. to solve it, I tried to put "MyHeavyTask" in an indipendent thread changing the line 17 with this one: self.StartButton=ttk.Button(self.parent, text="Start", command=threading.Thread(target=self.MyHeavyTask).start()) unfortunately this solution doesn't work. when I press the button, nothig happens…why? What is the right way to use the threading module in my example? A: You can add a method to the class def Get_Input(self): message = input(">") if message: send_message(message) and add in init class threading.Thread(target=self.Get_Input, args=(,)).start() Please note : If you passing one argument, you need to use threading.Thread(target=self.Get_Input, args=(var1,)).start() Unlike common sense :) A: Here a runnable example similar to the code in your question, that shows a way to run a background task and keep a ttk.Progressbar active simultaneously. It does this by using the universal after() widget method to repeatedly schedule calls to a method that checks whether the background task is running and updates the progress bar if it is. It also disables and re-enables the Start button appropriately so the task can't be start again while it's running. Note I strongly suggest you read and start following the PEP 8 - Style Guide for Python Code. from random import randint import tkinter as tk from tkinter import ttk import threading from time import sleep class MainWindow: def __init__(self): self.parent = tk.Tk() self.parent.geometry("786x524+370+100") self.parent.title("Test") self.parent.configure(background="#f0f0f0") self.parent.minsize(786, 524) self.task = threading.Thread(target=self.my_heavy_task) self.pro_bar = ttk.Progressbar(self.parent, mode="indeterminate") self.pro_bar.pack(padx=(40, 40), pady=(40, 40), fill=tk.BOTH) self.start_btn = ttk.Button(self.parent, text="Start", command=self.start) self.start_btn.pack(padx=(40, 40), pady=(40, 40), fill=tk.BOTH) self.parent.mainloop() def check_thread(self): if self.task.is_alive(): self.pro_bar.step() # Update progressbar. self.parent.after(20, self.check_thread) # Call again after delay. else: self.pro_bar.stop() self.start_btn.config(state=tk.ACTIVE) def start(self): """Start heavy background task.""" self.start_btn.config(state=tk.DISABLED) self.task.start() self.pro_bar.start() self.check_thread() # Start checking thread. def my_heavy_task(self): """Slow background task.""" for obj in (randint(0, 99) for _ in range(6)): print(obj) sleep(.5) if __name__=="__main__": app = MainWindow() A: Does this help? start_thread = MainWindow() run_test = threading.Thread(None, start_thread.start) run_test.start() # start my test: if __name__=="__main__": app=MainWindow()
Threading and Tkinter - How to use the threading module with my simple example?
I don't understand how to use the threading module properly. In this example I have two tkinter widgets, a button and a progress bar. The progress bar (configured in indeterminate mode) has to be active when the user pushes the button, and when the task is completed, the progress bar has to be stopped. import tkinter as tk from tkinter import ttk import threading, ipaddress class MainWindow: def __init__(self): self.parent=tk.Tk() self.parent.geometry("786x524+370+100") self.parent.title("Test") self.parent.configure(background="#f0f0f0") self.parent.minsize(786, 524) self.ProBar=ttk.Progressbar(self.parent, mode="indeterminate") self.ProBar.pack(padx=(40, 40), pady=(40, 40), fill=tk.BOTH) self.StartButton=ttk.Button(self.parent, text="Start", command=self.MyHeavyTask) self.StartButton.pack(padx=(40, 40), pady=(40, 40), fill=tk.BOTH) self.parent.mainloop() # my start function: def Start(self): self.ProBar.start(4) self.MyHeavyTask() self.ProBar.stop() # my real start function. it's just an example, it needs time to be completed: def MyHeavyTask(self): ls=[] obj=ipaddress.ip_network("10.0.0.0/8") for obj in list(obj.hosts()): print(obj.exploded) # start my test: if __name__=="__main__": app=MainWindow() This code has an issue, it can't run the function "MyHeavyTask" and at the same time keep active the progress bar widget. to solve it, I tried to put "MyHeavyTask" in an indipendent thread changing the line 17 with this one: self.StartButton=ttk.Button(self.parent, text="Start", command=threading.Thread(target=self.MyHeavyTask).start()) unfortunately this solution doesn't work. when I press the button, nothig happens…why? What is the right way to use the threading module in my example?
[ "You can add a method to the class\ndef Get_Input(self):\n message = input(\">\")\n if message:\n send_message(message)\n\nand add in init class\nthreading.Thread(target=self.Get_Input, args=(,)).start()\n\nPlease note :\nIf you passing one argument, you need to use\nthreading.Thread(target=self.Get_Input, args=(var1,)).start()\n\nUnlike common sense :)\n", "Here a runnable example similar to the code in your question, that shows a way to run a background task and keep a ttk.Progressbar active simultaneously. It does this by using the universal after() widget method to repeatedly schedule calls to a method that checks whether the background task is running and updates the progress bar if it is. It also disables and re-enables the Start button appropriately so the task can't be start again while it's running.\nNote I strongly suggest you read and start following the PEP 8 - Style Guide for Python Code.\nfrom random import randint\nimport tkinter as tk\nfrom tkinter import ttk\nimport threading\nfrom time import sleep\n\n\nclass MainWindow:\n def __init__(self):\n self.parent = tk.Tk()\n self.parent.geometry(\"786x524+370+100\")\n self.parent.title(\"Test\")\n self.parent.configure(background=\"#f0f0f0\")\n self.parent.minsize(786, 524)\n\n self.task = threading.Thread(target=self.my_heavy_task)\n\n self.pro_bar = ttk.Progressbar(self.parent, mode=\"indeterminate\")\n self.pro_bar.pack(padx=(40, 40), pady=(40, 40), fill=tk.BOTH)\n\n self.start_btn = ttk.Button(self.parent, text=\"Start\", command=self.start)\n self.start_btn.pack(padx=(40, 40), pady=(40, 40), fill=tk.BOTH)\n\n self.parent.mainloop()\n\n def check_thread(self):\n if self.task.is_alive():\n self.pro_bar.step() # Update progressbar.\n self.parent.after(20, self.check_thread) # Call again after delay.\n else:\n self.pro_bar.stop()\n self.start_btn.config(state=tk.ACTIVE)\n\n def start(self):\n \"\"\"Start heavy background task.\"\"\"\n self.start_btn.config(state=tk.DISABLED)\n self.task.start()\n self.pro_bar.start()\n self.check_thread() # Start checking thread.\n\n def my_heavy_task(self):\n \"\"\"Slow background task.\"\"\"\n for obj in (randint(0, 99) for _ in range(6)):\n print(obj)\n sleep(.5)\n\n\nif __name__==\"__main__\":\n app = MainWindow()\n\n\n", "Does this help?\nstart_thread = MainWindow()\nrun_test = threading.Thread(None, start_thread.start)\nrun_test.start()\n\n# start my test:\nif __name__==\"__main__\":\n app=MainWindow()\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "multithreading", "python", "tkinter" ]
stackoverflow_0072147490_multithreading_python_tkinter.txt
Q: how do i replace a string in a list Im trying to change a string in a list called lista composed by n times |_|, in a function I'm trying to change one specific place of the list with "X" but nothing is working lista=["|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|"] i want to change only the middle one to |X| I already tried different methods like, the command replace or pop and then insert a new value but nothing as changed and always gives me an error A: Use len(lista) // 2 to get the middle index. Should there be an un-even number, // 2 will 'round' it to the previous integer, so 9 --> 4 lista = [ "|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|" ] middle = len(lista) // 2 lista[middle] = '|X|' print(lista) ['|_|', '|_|', '|_|', '|_|', '|_|', '|X|', '|_|', '|_|', '|_|', '|_|'] Try it online A: lista=["|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|"] lista[ round(len(lista)/2)-1 ] = '|X|' Output: ['|_|', '|_|', '|_|', '|_|', '|X|', '|_|', '|_|', '|_|', '|_|', '|_|'] Use -1 because indexes starts from 0 A: This code places "|X|" in the middle of the list: if len(lista)%2==0: lista[int(len(lista)/2)-1]='|X|' lista[int(len(lista)/2)]='|X|' else: lista[int(np.floor(len(lista)/2))]='|X|' Output When len(lista)==10 ['|_|', '|_|', '|_|', '|_|', '|X|', '|X|', '|_|', '|_|', '|_|', '|_|'] Output When len(lista)==11 ['|_|', '|_|', '|_|', '|_|', '|_|', '|X|', '|_|', '|_|', '|_|', '|_|', '|_|']
how do i replace a string in a list
Im trying to change a string in a list called lista composed by n times |_|, in a function I'm trying to change one specific place of the list with "X" but nothing is working lista=["|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|","|_|"] i want to change only the middle one to |X| I already tried different methods like, the command replace or pop and then insert a new value but nothing as changed and always gives me an error
[ "Use len(lista) // 2 to get the middle index.\nShould there be an un-even number, // 2 will 'round' it to the previous integer, so 9 --> 4\nlista = [ \"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\" ]\nmiddle = len(lista) // 2\n\nlista[middle] = '|X|'\n\nprint(lista)\n\n['|_|', '|_|', '|_|', '|_|', '|_|', '|X|', '|_|', '|_|', '|_|', '|_|']\n\nTry it online\n", "lista=[\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\",\"|_|\"]\nlista[ round(len(lista)/2)-1 ] = '|X|'\n\nOutput:\n['|_|', '|_|', '|_|', '|_|', '|X|', '|_|', '|_|', '|_|', '|_|', '|_|']\n\nUse -1 because indexes starts from 0\n", "This code places \"|X|\" in the middle of the list:\nif len(lista)%2==0:\n lista[int(len(lista)/2)-1]='|X|'\n lista[int(len(lista)/2)]='|X|'\nelse:\n lista[int(np.floor(len(lista)/2))]='|X|'\n\nOutput When len(lista)==10\n['|_|', '|_|', '|_|', '|_|', '|X|', '|X|', '|_|', '|_|', '|_|', '|_|']\n\nOutput When len(lista)==11\n['|_|', '|_|', '|_|', '|_|', '|_|', '|X|', '|_|', '|_|', '|_|', '|_|', '|_|']\n\n" ]
[ 1, 1, 1 ]
[]
[]
[ "python", "replace", "string" ]
stackoverflow_0074534094_python_replace_string.txt
Q: Python3 check if exact string match in the event dictionary I have the following event body (dictionary) coming in to the lambda function and I do something like the below: { "test-report": { "url": "http://example.com", "original-policy": "default-src 'none'; style-src example.com; report-uri /_/test-reports" } } if 'test-report' in event['body']: try: do something here My problem is I want to check test-report in the first object in the dictionary. If that does not match, I don't want the function to do anything. However, as you can see **test-report**s is also present under original-policy values, which I don't want to consider for my if statement. A: Well, i asume, that by first objet you mean the key in the dictionary, as they are not ordered. If so, try : if 'test-report' in event['body'].keys(): try: do something here
Python3 check if exact string match in the event dictionary
I have the following event body (dictionary) coming in to the lambda function and I do something like the below: { "test-report": { "url": "http://example.com", "original-policy": "default-src 'none'; style-src example.com; report-uri /_/test-reports" } } if 'test-report' in event['body']: try: do something here My problem is I want to check test-report in the first object in the dictionary. If that does not match, I don't want the function to do anything. However, as you can see **test-report**s is also present under original-policy values, which I don't want to consider for my if statement.
[ "Well, i asume, that by first objet you mean the key in the dictionary, as they are not ordered.\nIf so, try :\n if 'test-report' in event['body'].keys():\n try:\ndo something here\n\n" ]
[ 0 ]
[]
[]
[ "lambda", "python", "python_3.x" ]
stackoverflow_0074534525_lambda_python_python_3.x.txt
Q: Flask object has no attribute get while following linode tutorial Good morning, Following the linode tutorial here to create a RESTful API https://www.linode.com/docs/guides/create-restful-api-using-python-and-flask/ I keep getting an attribute error 'Flask' object has no attribute 'get' Not sure what's going on because I'm following the tutorial precisely. from flask import Flask application = Flask(__name__) in_memory_datastore = { "COBOL" : {"name": "COBOL", "publication_year": 1960, "contribution": "record data"}, "ALGOL" : {"name": "ALGOL", "publication_year": 1958, "contribution": "scoping and nested functions"}, "APL" : {"name": "APL", "publication_year": 1962, "contribution": "array processing"}, } @application.get('/programming_languages') def list_programming_languages(): return {"programming_languages":list(in_memory_datastore.values())} A: You're probably running an older version of Flask (v2.0.x or below). Flask added @application.get feature in v2.1.x branch (check documentation here). For older flask versions use @application.route('/programming_languages', methods=['GET']). Documentation here.
Flask object has no attribute get while following linode tutorial
Good morning, Following the linode tutorial here to create a RESTful API https://www.linode.com/docs/guides/create-restful-api-using-python-and-flask/ I keep getting an attribute error 'Flask' object has no attribute 'get' Not sure what's going on because I'm following the tutorial precisely. from flask import Flask application = Flask(__name__) in_memory_datastore = { "COBOL" : {"name": "COBOL", "publication_year": 1960, "contribution": "record data"}, "ALGOL" : {"name": "ALGOL", "publication_year": 1958, "contribution": "scoping and nested functions"}, "APL" : {"name": "APL", "publication_year": 1962, "contribution": "array processing"}, } @application.get('/programming_languages') def list_programming_languages(): return {"programming_languages":list(in_memory_datastore.values())}
[ "You're probably running an older version of Flask (v2.0.x or below).\nFlask added @application.get feature in v2.1.x branch (check documentation here).\nFor older flask versions use @application.route('/programming_languages', methods=['GET']). Documentation here.\n" ]
[ 1 ]
[]
[]
[ "flask", "python" ]
stackoverflow_0074534406_flask_python.txt
Q: Python create secret with tags in Google Secret manager I am using Google Cloud run for my applications. I am storing all my secrets in Google Cloud Secret Manager. To read secrets I do the following: from google.cloud import secretmanager import hashlib def access_secret_version(secret_id, version_id="latest"): # Create the Secret Manager client. client = secretmanager.SecretManagerServiceClient() # Build the resource name of the secret version. PROJECT_ID = "xxxxx" name = f"projects/{PROJECT_ID}/secrets/{secret_id}/versions/{version_id}" # Access the secret version. response = client.access_secret_version(name=name) # Return the decoded payload. return response.payload.data.decode('UTF-8') def secret_hash(secret_value): # return the sha224 hash of the secret value return hashlib.sha224(bytes(secret_value, "utf-8")).hexdigest() To write secrets: from google.cloud import secretmanager def create_secret(secret_id): # Create the Secret Manager client. client = secretmanager.SecretManagerServiceClient() # Build the resource name of the parent project. PROJECT_ID = "xxxx" parent = f"projects/{PROJECT_ID}" # Build a dict of settings for the secret secret = {'replication': {'automatic': {}}} # Create the secret response = client.create_secret(secret_id=secret_id, parent=parent, secret=secret) # Print the new secret name. print(f'Created secret: {response.name}') How can I create secrets with tags in Python? A: One method to determine if feature is available is to study the REST API. Secrets do not support tags. Method: projects.secrets.create Resource: Secret Secrets support labels, annotations and versionAliases. Depending on your use case, versionAliases might be work instead of tags. Optional. Mapping from version alias to version name. A version alias is a string with a maximum length of 63 characters and can contain uppercase and lowercase letters, numerals, and the hyphen (-) and underscore ('_') characters. An alias string must start with a letter and cannot be the string 'latest' or 'NEW'. No more than 50 aliases can be assigned to a given secret. Version-Alias pairs will be viewable via secrets.get and modifiable via secrets.patch. At launch Access by Allias will only be supported on versions.get and versions.access. A: Below is the sample Python code to create a secret with tags in Google Secret manager. def create_secret(project_id, secret_id): """ Create a new secret with the given name. A secret is a logical wrapper around a collection of secret versions. Secret versions hold the actual secret material. """ # Import the Secret Manager client library. from google.cloud import secretmanager # Create the Secret Manager client. client = secretmanager.SecretManagerServiceClient() # Build the resource name of the parent project. parent = f"projects/{project_id}" # Create the secret. response = client.create_secret( request={ "parent": parent, "secret_id": secret_id, "secret": {"replication": {"automatic": {}}}, } ) # Print the new secret name. print("Created secret: {}".format(response.name)) To run this code, first set up a Python development environment and install the Secret Manager Python SDK. Refer to this link doc1 and doc2 about creating secrets with tags in Google secret manager.
Python create secret with tags in Google Secret manager
I am using Google Cloud run for my applications. I am storing all my secrets in Google Cloud Secret Manager. To read secrets I do the following: from google.cloud import secretmanager import hashlib def access_secret_version(secret_id, version_id="latest"): # Create the Secret Manager client. client = secretmanager.SecretManagerServiceClient() # Build the resource name of the secret version. PROJECT_ID = "xxxxx" name = f"projects/{PROJECT_ID}/secrets/{secret_id}/versions/{version_id}" # Access the secret version. response = client.access_secret_version(name=name) # Return the decoded payload. return response.payload.data.decode('UTF-8') def secret_hash(secret_value): # return the sha224 hash of the secret value return hashlib.sha224(bytes(secret_value, "utf-8")).hexdigest() To write secrets: from google.cloud import secretmanager def create_secret(secret_id): # Create the Secret Manager client. client = secretmanager.SecretManagerServiceClient() # Build the resource name of the parent project. PROJECT_ID = "xxxx" parent = f"projects/{PROJECT_ID}" # Build a dict of settings for the secret secret = {'replication': {'automatic': {}}} # Create the secret response = client.create_secret(secret_id=secret_id, parent=parent, secret=secret) # Print the new secret name. print(f'Created secret: {response.name}') How can I create secrets with tags in Python?
[ "One method to determine if feature is available is to study the REST API. Secrets do not support tags.\n\nMethod: projects.secrets.create\nResource: Secret\n\nSecrets support labels, annotations and versionAliases.\nDepending on your use case, versionAliases might be work instead of tags.\n\nOptional. Mapping from version alias to version name.\nA version alias is a string with a maximum length of 63 characters and\ncan contain uppercase and lowercase letters, numerals, and the hyphen\n(-) and underscore ('_') characters. An alias string must start with a\nletter and cannot be the string 'latest' or 'NEW'. No more than 50\naliases can be assigned to a given secret.\nVersion-Alias pairs will be viewable via secrets.get and modifiable\nvia secrets.patch. At launch Access by Allias will only be supported\non versions.get and versions.access.\n\n", "Below is the sample Python code to create a secret with tags in Google Secret manager.\ndef create_secret(project_id, secret_id):\n\"\"\"\nCreate a new secret with the given name. A secret is a logical wrapper\naround a collection of secret versions. Secret versions hold the actual\nsecret material.\n\"\"\"\n# Import the Secret Manager client library.\nfrom google.cloud import secretmanager\n\n# Create the Secret Manager client.\nclient = secretmanager.SecretManagerServiceClient()\n\n# Build the resource name of the parent project.\nparent = f\"projects/{project_id}\"\n\n# Create the secret.\nresponse = client.create_secret(\n request={\n \"parent\": parent,\n \"secret_id\": secret_id,\n \"secret\": {\"replication\": {\"automatic\": {}}},\n }\n)\n\n# Print the new secret name.\nprint(\"Created secret: {}\".format(response.name))\n\nTo run this code, first set up a Python development environment and install the Secret Manager Python SDK.\nRefer to this link doc1 and doc2 about creating secrets with tags in Google secret manager.\n" ]
[ 0, 0 ]
[]
[]
[ "google_cloud_platform", "google_secret_manager", "python" ]
stackoverflow_0074517584_google_cloud_platform_google_secret_manager_python.txt
Q: How to find an alphabet and extract the alphabet and the number tagged along with it in Pandas? I would like to create a new column in the data frame that will search for the alphabet in a column. Based on it, it will then search for the next number and copy the alphabet and number into newly extracted column. Example: Month Sem_Year 2020-04-01 H1 2020 2020-05-01 2020 H1 2020-06-01 H1 2020 2020-07-01 H2 2020 2020-08-01 H2 2020 2020-09-01 2020 H2 2020-10-01 2020 H2 2020-11-01 H2 2020 2020-12-01 H2 2020 2021-01-01 H1 2021 2021-02-01 H1 2021 Now I want to search for the alphabet H in the second column and extract the alphabet and number tagged along with it. Example: Month Sem_Year Sem 2020-04-01 H1 2020 H1 2020-05-01 2020 H1 H1 2020-06-01 H1 2020 H1 2020-07-01 H2 2020 H2 2020-08-01 H2 2020 H2 2020-09-01 2020 H2 H2 2020-10-01 2020 H2 H2 2020-11-01 H2 2020 H2 2020-12-01 H2 2020 H2 2021-01-01 H1 2021 H1 2021-02-01 H1 2021 H1 A: For the varied formats you have defined you need to use a Regex expression. Note that H\d means H followed by a digit. This regex could be modified for other requirements. df['Sem'] = df['Sem_year'].str.extract("(H\d)") A: You can use df.insert() to add a new column. For extracting the alphabet, loop through the values (column_value) in the second column and use "value_for_new_column=column_value.split(' ')[0]"
How to find an alphabet and extract the alphabet and the number tagged along with it in Pandas?
I would like to create a new column in the data frame that will search for the alphabet in a column. Based on it, it will then search for the next number and copy the alphabet and number into newly extracted column. Example: Month Sem_Year 2020-04-01 H1 2020 2020-05-01 2020 H1 2020-06-01 H1 2020 2020-07-01 H2 2020 2020-08-01 H2 2020 2020-09-01 2020 H2 2020-10-01 2020 H2 2020-11-01 H2 2020 2020-12-01 H2 2020 2021-01-01 H1 2021 2021-02-01 H1 2021 Now I want to search for the alphabet H in the second column and extract the alphabet and number tagged along with it. Example: Month Sem_Year Sem 2020-04-01 H1 2020 H1 2020-05-01 2020 H1 H1 2020-06-01 H1 2020 H1 2020-07-01 H2 2020 H2 2020-08-01 H2 2020 H2 2020-09-01 2020 H2 H2 2020-10-01 2020 H2 H2 2020-11-01 H2 2020 H2 2020-12-01 H2 2020 H2 2021-01-01 H1 2021 H1 2021-02-01 H1 2021 H1
[ "For the varied formats you have defined you need to use a Regex expression. Note that H\\d means H followed by a digit. This regex could be modified for other requirements.\ndf['Sem'] = df['Sem_year'].str.extract(\"(H\\d)\")\n\n", "You can use df.insert() to add a new column. For extracting the alphabet, loop through the values (column_value) in the second column and use \"value_for_new_column=column_value.split(' ')[0]\"\n" ]
[ 1, 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074534517_dataframe_pandas_python.txt
Q: Python key, value and increment in for loop with dictionary.items() I am looking for a way to get the current loop iteration while looping though a key, value pair of dictionary items. currently i am using enumerate() to split it into iteration, tuple(key, value) however this requires using tuple indexes to split it back out. mydict = {'fruit':'apple', 'veg':'potato', 'sweet':'haribo'} for i, kv in enumerate(mydict.items()): print(f"iteration={i}, key={kv[0]}, value={kv[1]}") #>>> iteration=0, key=fruit, value=apple #>>> iteration=1, key=veg, value=potato #>>> iteration=2, key=sweet, value=haribo what I am looking for is a better method to replace the following: i = 0 for key, value in mydict.items(): print(f"iteration={i}, key={key}, value={value}") i+=1 #>>> iteration=0, key=fruit, value=apple #>>> iteration=1, key=veg, value=potato #>>> iteration=2, key=sweet, value=haribo I do not want to give up user assigned keywords for the dictionary just to include iteration. I am hoping to find a solution similar to (psudo-code): for key, value, iteration in (tuple(mydict.items()), list(mydict.keys()).index(k)): A: Just put the (key, value) in brackets: mydict = {'fruit':'apple', 'veg':'potato', 'sweet':'haribo'} for i, (key, value) in enumerate(mydict.items()): print(f"iteration={i}, key={key}, value={value}")
Python key, value and increment in for loop with dictionary.items()
I am looking for a way to get the current loop iteration while looping though a key, value pair of dictionary items. currently i am using enumerate() to split it into iteration, tuple(key, value) however this requires using tuple indexes to split it back out. mydict = {'fruit':'apple', 'veg':'potato', 'sweet':'haribo'} for i, kv in enumerate(mydict.items()): print(f"iteration={i}, key={kv[0]}, value={kv[1]}") #>>> iteration=0, key=fruit, value=apple #>>> iteration=1, key=veg, value=potato #>>> iteration=2, key=sweet, value=haribo what I am looking for is a better method to replace the following: i = 0 for key, value in mydict.items(): print(f"iteration={i}, key={key}, value={value}") i+=1 #>>> iteration=0, key=fruit, value=apple #>>> iteration=1, key=veg, value=potato #>>> iteration=2, key=sweet, value=haribo I do not want to give up user assigned keywords for the dictionary just to include iteration. I am hoping to find a solution similar to (psudo-code): for key, value, iteration in (tuple(mydict.items()), list(mydict.keys()).index(k)):
[ "Just put the (key, value) in brackets:\nmydict = {'fruit':'apple', 'veg':'potato', 'sweet':'haribo'}\n\nfor i, (key, value) in enumerate(mydict.items()):\n print(f\"iteration={i}, key={key}, value={value}\")\n\n" ]
[ 1 ]
[]
[]
[ "dictionary", "python" ]
stackoverflow_0074534522_dictionary_python.txt
Q: How to fix Python import google API error from googleapiclient.discovery import build After pip installing google api for python google tells me to use this command however the command doesn't work! Can anyone help? https://developers.google.com/docs/api/quickstart/python Traceback (most recent call last): File "C:\Users\M1\PycharmProjects\YouTube\main.py", line 1, in <module> from googleapiclient.discovery import build ModuleNotFoundError: No module named 'googleapiclient' https://developers.google.com/docs/api/quickstart/python A: I have installed the libraries using the following command: pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib Then, you can check that the library has been installed properly by running: pip show google-api-python-client Now, you should be able to import the libraries in your python code. Make sure that the code is executed within your virtual environment in case you are using one.
How to fix Python import google API error
from googleapiclient.discovery import build After pip installing google api for python google tells me to use this command however the command doesn't work! Can anyone help? https://developers.google.com/docs/api/quickstart/python Traceback (most recent call last): File "C:\Users\M1\PycharmProjects\YouTube\main.py", line 1, in <module> from googleapiclient.discovery import build ModuleNotFoundError: No module named 'googleapiclient' https://developers.google.com/docs/api/quickstart/python
[ "I have installed the libraries using the following command:\npip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib\n\nThen, you can check that the library has been installed properly by running:\npip show google-api-python-client\n\nNow, you should be able to import the libraries in your python code. Make sure that the code is executed within your virtual environment in case you are using one.\n" ]
[ 0 ]
[]
[]
[ "google_api_python_client", "python" ]
stackoverflow_0074534568_google_api_python_client_python.txt
Q: Failed in nopython mode pipeline (step: nopython frontend) No implementation of function Function TypingError During: typing of setitem def calc_ppr_topk_parallel(indptr, indices, deg, alpha, epsilon, nodes, topk): <source elided> idx_topk = np.argsort(val_np)[-topk:] js[i] = j_np[idx_topk] ^ import numba import numpy as np import scipy.sparse as sp @numba.njit(cache=True, locals={'_val': numba.float32, 'res': numba.float32, 'res_vnode': numba.float32}) def _calc_ppr_node(inode, indptr, indices, deg, alpha, epsilon): alpha_eps = alpha * epsilon f32_0 = numba.float32(0) p = {inode: f32_0} r = {} r[inode] = alpha q = [inode] while len(q) > 0: unode = q.pop() res = r[unode] if unode in r else f32_0 if unode in p: p[unode] += res else: p[unode] = res r[unode] = f32_0 for vnode in indices[indptr[unode]:indptr[unode + 1]]: _val = (1 - alpha) * res / deg[unode] if vnode in r: r[vnode] += _val else: r[vnode] = _val res_vnode = r[vnode] if vnode in r else f32_0 if res_vnode >= alpha_eps * deg[vnode]: if vnode not in q: q.append(vnode) return list(p.keys()), list(p.values()) @numba.njit(cache=True) def calc_ppr(indptr, indices, deg, alpha, epsilon, nodes): js = [] vals = [] for i, node in enumerate(nodes): j, val = _calc_ppr_node(node, indptr, indices, deg, alpha, epsilon) js.append(j) vals.append(val) return js, vals @numba.njit(cache=True,locals={'j_np': numba.int64,'val_np': numba.int64}) def calc_ppr_topk_parallel(indptr, indices, deg, alpha, epsilon, nodes, topk): js = [np.zeros(0, dtype=np.int64)] * len(nodes) vals = [np.zeros(0, dtype=np.float32)] * len(nodes) for i in numba.prange(len(nodes)): j, val = _calc_ppr_node(nodes[i], indptr, indices, deg, alpha, epsilon) j_np, val_np = np.array(j), np.array(val) idx_topk = np.argsort(val_np)[-topk:] js[i] = j_np[idx_topk] vals[i] = val_np[idx_topk] return js, vals def ppr_topk(adj_matrix, alpha, epsilon, nodes, topk): """Calculate the PPR matrix approximately using Anderson.""" out_degree = np.sum(adj_matrix > 0, axis=1).A1 nnodes = adj_matrix.shape[0] neighbors, weights = calc_ppr_topk_parallel(adj_matrix.indptr, adj_matrix.indices, out_degree, numba.float32(alpha), numba.float32(epsilon), nodes, topk) return construct_sparse(neighbors, weights, (len(nodes), nnodes)) def construct_sparse(neighbors, weights, shape): i = np.repeat(np.arange(len(neighbors)), np.fromiter(map(len, neighbors), dtype=np.int)) j = np.concatenate(neighbors) return sp.coo_matrix((np.concatenate(weights), (i, j)), shape) def topk_ppr_matrix(adj_matrix, alpha, eps, idx, topk, normalization='row'): """Create a sparse matrix where each node has up to the topk PPR neighbors and their weights.""" topk_matrix = ppr_topk(adj_matrix, alpha, eps, idx, topk).tocsr() if normalization == 'sym': # Assume undirected (symmetric) adjacency matrix deg = adj_matrix.sum(1).A1 deg_sqrt = np.sqrt(np.maximum(deg, 1e-12)) deg_inv_sqrt = 1. / deg_sqrt row, col = topk_matrix.nonzero() # assert np.all(deg[idx[row]] > 0) # assert np.all(deg[col] > 0) topk_matrix.data = deg_sqrt[idx[row]] * topk_matrix.data * deg_inv_sqrt[col] elif normalization == 'col': # Assume undirected (symmetric) adjacency matrix deg = adj_matrix.sum(1).A1 deg_inv = 1. / np.maximum(deg, 1e-12) row, col = topk_matrix.nonzero() # assert np.all(deg[idx[row]] > 0) # assert np.all(deg[col] > 0) topk_matrix.data = deg[idx[row]] * topk_matrix.data * deg_inv[col] elif normalization == 'row': pass else: raise ValueError(f"Unknown PPR normalization: {normalization}") return topk_matrix ` I think I went about it the wrong way. TypingError: invalid setitem with value of array(int32, 1d, C) to element of array(int64, 1d, C) This code is in the pprgo algorithm model, but when I run this error, it should not be the author's error. The algorithm prototype :https://github.com/TUM-DAML/pprgo_pytorch Is there a simple way to change the data type to the same one?Can we use it without numba acceleration ,as long as it works? A: You are using a dictionairy inside of a numba function. As numba only has very limited support for dictionaries, the code crashes. The error message implies that there is another problem with the code aswell, but without any input given I am not going to bother trying to find it. It looks like you have also tried to give a lot of keywords to numba, some of which do not seem to be doing anything, such as the cache keyword. I get the feeling that you just grabbed your (large) function and tried to apply numba to it. This is not the way to go, as numba has quite a bit of a learning curve to use correctly. I suggest that you try to get a very small function to work first, and then try to expand it until you have your original function. Also dictionaries are generally a bad idea if you want to write high perfomance code.
Failed in nopython mode pipeline (step: nopython frontend) No implementation of function Function
TypingError During: typing of setitem def calc_ppr_topk_parallel(indptr, indices, deg, alpha, epsilon, nodes, topk): <source elided> idx_topk = np.argsort(val_np)[-topk:] js[i] = j_np[idx_topk] ^ import numba import numpy as np import scipy.sparse as sp @numba.njit(cache=True, locals={'_val': numba.float32, 'res': numba.float32, 'res_vnode': numba.float32}) def _calc_ppr_node(inode, indptr, indices, deg, alpha, epsilon): alpha_eps = alpha * epsilon f32_0 = numba.float32(0) p = {inode: f32_0} r = {} r[inode] = alpha q = [inode] while len(q) > 0: unode = q.pop() res = r[unode] if unode in r else f32_0 if unode in p: p[unode] += res else: p[unode] = res r[unode] = f32_0 for vnode in indices[indptr[unode]:indptr[unode + 1]]: _val = (1 - alpha) * res / deg[unode] if vnode in r: r[vnode] += _val else: r[vnode] = _val res_vnode = r[vnode] if vnode in r else f32_0 if res_vnode >= alpha_eps * deg[vnode]: if vnode not in q: q.append(vnode) return list(p.keys()), list(p.values()) @numba.njit(cache=True) def calc_ppr(indptr, indices, deg, alpha, epsilon, nodes): js = [] vals = [] for i, node in enumerate(nodes): j, val = _calc_ppr_node(node, indptr, indices, deg, alpha, epsilon) js.append(j) vals.append(val) return js, vals @numba.njit(cache=True,locals={'j_np': numba.int64,'val_np': numba.int64}) def calc_ppr_topk_parallel(indptr, indices, deg, alpha, epsilon, nodes, topk): js = [np.zeros(0, dtype=np.int64)] * len(nodes) vals = [np.zeros(0, dtype=np.float32)] * len(nodes) for i in numba.prange(len(nodes)): j, val = _calc_ppr_node(nodes[i], indptr, indices, deg, alpha, epsilon) j_np, val_np = np.array(j), np.array(val) idx_topk = np.argsort(val_np)[-topk:] js[i] = j_np[idx_topk] vals[i] = val_np[idx_topk] return js, vals def ppr_topk(adj_matrix, alpha, epsilon, nodes, topk): """Calculate the PPR matrix approximately using Anderson.""" out_degree = np.sum(adj_matrix > 0, axis=1).A1 nnodes = adj_matrix.shape[0] neighbors, weights = calc_ppr_topk_parallel(adj_matrix.indptr, adj_matrix.indices, out_degree, numba.float32(alpha), numba.float32(epsilon), nodes, topk) return construct_sparse(neighbors, weights, (len(nodes), nnodes)) def construct_sparse(neighbors, weights, shape): i = np.repeat(np.arange(len(neighbors)), np.fromiter(map(len, neighbors), dtype=np.int)) j = np.concatenate(neighbors) return sp.coo_matrix((np.concatenate(weights), (i, j)), shape) def topk_ppr_matrix(adj_matrix, alpha, eps, idx, topk, normalization='row'): """Create a sparse matrix where each node has up to the topk PPR neighbors and their weights.""" topk_matrix = ppr_topk(adj_matrix, alpha, eps, idx, topk).tocsr() if normalization == 'sym': # Assume undirected (symmetric) adjacency matrix deg = adj_matrix.sum(1).A1 deg_sqrt = np.sqrt(np.maximum(deg, 1e-12)) deg_inv_sqrt = 1. / deg_sqrt row, col = topk_matrix.nonzero() # assert np.all(deg[idx[row]] > 0) # assert np.all(deg[col] > 0) topk_matrix.data = deg_sqrt[idx[row]] * topk_matrix.data * deg_inv_sqrt[col] elif normalization == 'col': # Assume undirected (symmetric) adjacency matrix deg = adj_matrix.sum(1).A1 deg_inv = 1. / np.maximum(deg, 1e-12) row, col = topk_matrix.nonzero() # assert np.all(deg[idx[row]] > 0) # assert np.all(deg[col] > 0) topk_matrix.data = deg[idx[row]] * topk_matrix.data * deg_inv[col] elif normalization == 'row': pass else: raise ValueError(f"Unknown PPR normalization: {normalization}") return topk_matrix ` I think I went about it the wrong way. TypingError: invalid setitem with value of array(int32, 1d, C) to element of array(int64, 1d, C) This code is in the pprgo algorithm model, but when I run this error, it should not be the author's error. The algorithm prototype :https://github.com/TUM-DAML/pprgo_pytorch Is there a simple way to change the data type to the same one?Can we use it without numba acceleration ,as long as it works?
[ "You are using a dictionairy inside of a numba function. As numba only has very limited support for dictionaries, the code crashes. The error message implies that there is another problem with the code aswell, but without any input given I am not going to bother trying to find it.\nIt looks like you have also tried to give a lot of keywords to numba, some of which do not seem to be doing anything, such as the cache keyword. I get the feeling that you just grabbed your (large) function and tried to apply numba to it.\nThis is not the way to go, as numba has quite a bit of a learning curve to use correctly. I suggest that you try to get a very small function to work first, and then try to expand it until you have your original function.\nAlso dictionaries are generally a bad idea if you want to write high perfomance code.\n" ]
[ 0 ]
[]
[]
[ "numba", "python" ]
stackoverflow_0074530680_numba_python.txt
Q: CNN model for timeseries prediction I want to build a CNN model. I have x_train=8000000x7, y_train=8000000x2. Since it is a multivariant time series. How can feed the input with window size of 160 and stride=1. what should be the input for cnn model? I used timeseriesgenerator for creating a dataset as follows train_gen = tf.keras.preprocessing.sequence.TimeseriesGenerator(X_train, Y_train, length=160, sampling_rate=1,shuffle=False, batch_size=256) batch_0 = train_gen[0] data, label = batch_0 print("Shape of the generator data and label:", data.shape, label.shape) input=data.shape[1],data.shape[2] For LSTM I have used 'input' as the input shape. What should be the input for CNN model. 1)can use timeseriesgeneartor for CNN model? 2) is there any datagenerator for creating a sliding window approach? A: First, TimeseriesGenerator is deprecated and do not take tensorflow tensor as input so I discourage to use it. Instead you can use timeseries_dataset_from_array (doc here) from keras utils. It also generate sliding windows. For time serie prediction, you should use 1-D CNN. They take a sequence as input exactly like LSTM. As shape is concerned, in Tensorflow it is still: input = data.shape[1], data.shape[2] Assuming that data.shape[0] is the batch size, data.shape[1] the sequence length and data.shape[2] the number of features of each elements.
CNN model for timeseries prediction
I want to build a CNN model. I have x_train=8000000x7, y_train=8000000x2. Since it is a multivariant time series. How can feed the input with window size of 160 and stride=1. what should be the input for cnn model? I used timeseriesgenerator for creating a dataset as follows train_gen = tf.keras.preprocessing.sequence.TimeseriesGenerator(X_train, Y_train, length=160, sampling_rate=1,shuffle=False, batch_size=256) batch_0 = train_gen[0] data, label = batch_0 print("Shape of the generator data and label:", data.shape, label.shape) input=data.shape[1],data.shape[2] For LSTM I have used 'input' as the input shape. What should be the input for CNN model. 1)can use timeseriesgeneartor for CNN model? 2) is there any datagenerator for creating a sliding window approach?
[ "First, TimeseriesGenerator is deprecated and do not take tensorflow tensor as input so I discourage to use it. Instead you can use timeseries_dataset_from_array (doc here) from keras utils. It also generate sliding windows.\nFor time serie prediction, you should use 1-D CNN. They take a sequence as input exactly like LSTM. As shape is concerned, in Tensorflow it is still:\ninput = data.shape[1], data.shape[2]\n\nAssuming that data.shape[0] is the batch size, data.shape[1] the sequence length and data.shape[2] the number of features of each elements.\n" ]
[ 1 ]
[]
[]
[ "conv_neural_network", "lstm", "python", "tensorflow" ]
stackoverflow_0074532559_conv_neural_network_lstm_python_tensorflow.txt
Q: In this program i want to extract the date in YYYY-MM-DD format from mysql database using python CODE: cur = sqlCon.cursor() cur.execute("select datedue from library where member=%s ", Member.get()) row = cur.fetchone() print(datetime.date.today()) for x in row: print(row) But the result is in (datetime.date(2022, 12, 6),) fromat What should I do????? A: You may use the strftime() function: cur.execute("SELECT datedue FROM library WHERE member = %s", Member.get()) row = cur.fetchone() date_str = row["datedue"].strftime("%Y-%m-%d") print(date_str) You could also handle this on the MySQL side by using the STR_TO_DATE() function: sql = "SELECT STR_TO_DATE(datedue, '%Y-%m-%d') AS datedue FROM library WHERE member = %s" cur.execute(sql, Member.get()) row = cur.fetchone() date_str = row["datedue"] print(date_str) Here STR_TO_DATE() returns a string on the database side, so no conversion would be needed in Python.
In this program i want to extract the date in YYYY-MM-DD format from mysql database using python
CODE: cur = sqlCon.cursor() cur.execute("select datedue from library where member=%s ", Member.get()) row = cur.fetchone() print(datetime.date.today()) for x in row: print(row) But the result is in (datetime.date(2022, 12, 6),) fromat What should I do?????
[ "You may use the strftime() function:\ncur.execute(\"SELECT datedue FROM library WHERE member = %s\", Member.get())\nrow = cur.fetchone()\ndate_str = row[\"datedue\"].strftime(\"%Y-%m-%d\")\nprint(date_str)\n\nYou could also handle this on the MySQL side by using the STR_TO_DATE() function:\nsql = \"SELECT STR_TO_DATE(datedue, '%Y-%m-%d') AS datedue FROM library WHERE member = %s\"\ncur.execute(sql, Member.get())\nrow = cur.fetchone()\ndate_str = row[\"datedue\"]\nprint(date_str)\n\nHere STR_TO_DATE() returns a string on the database side, so no conversion would be needed in Python.\n" ]
[ 0 ]
[]
[]
[ "mysql", "python" ]
stackoverflow_0074534660_mysql_python.txt
Q: Local variable 'result' might be referenced before assignment With a flow like this: def func(): try: result = calculate() finally: try: cleanup() except Exception: pass return result There is a warning about Local variable 'result' might be referenced before assignment: But I can't really see how that's possible. One of these must be true: calculate() raises an exception --> the return statement will never get reached, so result is not referenced again calculate() does not raise an exception --> result is successfully assigned, and the return statement returns that value How would you ever get result referenced before assignment? Is there an implementation of calculate and cleanup which could demonstrate that happening? A: This is a false positive of PyCharms warning heuristics. As per the Python specification, the code behaves as you describe and result can only be reached when set. According to 8.4 in the Python documentation: If the finally clause executes a return, break or continue statement, the saved exception is discarded: >>> def f(): ... try: ... 1/0 ... finally: ... return 42 ... >>> f() 42 The Python interpreter will ignore the exception that was caused by calculate() if the finally block contains a return, break, or continue statement. This means that with the implementation you provided, where the finally block has neither of the words specified above, the exception caused by calculate won't be discarded, so the result variable won't be referenced, meaning that this warning is useless.
Local variable 'result' might be referenced before assignment
With a flow like this: def func(): try: result = calculate() finally: try: cleanup() except Exception: pass return result There is a warning about Local variable 'result' might be referenced before assignment: But I can't really see how that's possible. One of these must be true: calculate() raises an exception --> the return statement will never get reached, so result is not referenced again calculate() does not raise an exception --> result is successfully assigned, and the return statement returns that value How would you ever get result referenced before assignment? Is there an implementation of calculate and cleanup which could demonstrate that happening?
[ "This is a false positive of PyCharms warning heuristics. As per the Python specification, the code behaves as you describe and result can only be reached when set.\n\nAccording to 8.4 in the Python documentation:\n\nIf the finally clause executes a return, break or continue statement, the saved exception is discarded:\n\n\n>>> def f():\n... try:\n... 1/0\n... finally:\n... return 42\n...\n>>> f()\n42\n\nThe Python interpreter will ignore the exception that was caused by calculate() if the finally block contains a return, break, or continue statement.\nThis means that with the implementation you provided, where the finally block has neither of the words specified above, the exception caused by calculate won't be discarded, so the result variable won't be referenced, meaning that this warning is useless.\n" ]
[ 1 ]
[]
[]
[ "exception", "function", "pycharm", "python", "scope" ]
stackoverflow_0069845686_exception_function_pycharm_python_scope.txt
Q: Problem by installing pygame on cmd and visual studio Basically I'm trying to install pygame on my pc and the installation doesn't work, I already installed pygame on another pc and didn't have any problem. All solutions appreciated Output: C:\Users\Station>pip install pygame Defaulting to user installation because normal site-packages is not writeable WARNING: Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ WARNING: Retrying (Retry(total=3, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ WARNING: Retrying (Retry(total=2, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ WARNING: Retrying (Retry(total=1, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ WARNING: Retrying (Retry(total=0, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ Could not fetch URL https://pypi.org/simple/pygame/: There was a problem confirming the ssl certificate: HTTPSConnectionPool(host='pypi.org', port=443): Max retries exceeded with url: /simple/pygame/ (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))) - skipping ERROR: Could not find a version that satisfies the requirement pygame (from versions: none) ERROR: No matching distribution found for pygame Could not fetch URL https://pypi.org/simple/pip/: There was a problem confirming the ssl certificate: HTTPSConnectionPool(host='pypi.org', port=443): Max retries exceeded with url: /simple/pip/ (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))) - skipping WARNING: There was an error checking the latest version of pip. I already tried installing pip, new versions of python, admin privileges and nothing worked A: there are some ways to fix this: 1 - update pip to newest version 2 - you can type this in python code to install any library import pip pip.main(["install", "pygame"]) 3 - you can install pygame from github with typing in command prompt: pip install git+https://github.com/pygame/pygame
Problem by installing pygame on cmd and visual studio
Basically I'm trying to install pygame on my pc and the installation doesn't work, I already installed pygame on another pc and didn't have any problem. All solutions appreciated Output: C:\Users\Station>pip install pygame Defaulting to user installation because normal site-packages is not writeable WARNING: Retrying (Retry(total=4, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ WARNING: Retrying (Retry(total=3, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ WARNING: Retrying (Retry(total=2, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ WARNING: Retrying (Retry(total=1, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ WARNING: Retrying (Retry(total=0, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))': /simple/pygame/ Could not fetch URL https://pypi.org/simple/pygame/: There was a problem confirming the ssl certificate: HTTPSConnectionPool(host='pypi.org', port=443): Max retries exceeded with url: /simple/pygame/ (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))) - skipping ERROR: Could not find a version that satisfies the requirement pygame (from versions: none) ERROR: No matching distribution found for pygame Could not fetch URL https://pypi.org/simple/pip/: There was a problem confirming the ssl certificate: HTTPSConnectionPool(host='pypi.org', port=443): Max retries exceeded with url: /simple/pip/ (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: self signed certificate in certificate chain (_ssl.c:992)'))) - skipping WARNING: There was an error checking the latest version of pip. I already tried installing pip, new versions of python, admin privileges and nothing worked
[ "there are some ways to fix this:\n1 - update pip to newest version\n2 - you can type this in python code to install any library\nimport pip \npip.main([\"install\", \"pygame\"])\n\n3 - you can install pygame from github with typing in command prompt:\npip install git+https://github.com/pygame/pygame\n\n" ]
[ 0 ]
[]
[]
[ "pip", "pygame", "python" ]
stackoverflow_0074534416_pip_pygame_python.txt
Q: How can I fix: 'KeyError [x] not found in axis' when filtering dataframe I am trying to filter my dataframe based on IQR for a few selected features. The code I use is the following: import pandas as pd import numpy as np # Load data df = pd.read_csv("dataframe.csv") features = df.loc[:, ('col1, col2, col3, col4, col5')] print("Old Shape: ", df.shape) def filtering(column_name): print(column_name) Q1 = np.percentile(df[column_name], 25, interpolation = 'midpoint') Q3 = np.percentile(df[column_name], 75, interpolation = 'midpoint') IQR = Q3 - Q1 # Upper bound upper = np.where(df[column_name] >= (Q3+1.5*IQR)) # Lower bound lower = np.where(df[column_name] <= (Q1-1.5*IQR)) ''' Removing the Outliers ''' df.drop(upper[0], inplace = True) df.drop(lower[0], inplace = True) print("New Shape: ", df.shape) print('==== done ====') for col in features.columns: filtering(col) The error (on line 28, df.drop(lower[0], inplace=True): KeyError: '[14] not found in axis' The KeyError is caused by the fact that an index is already dropped because it is an outlier in one of the features, after which it is detected again. Since it is already dropped, this index cannot be found. I am however unsure how it is detected as an outlier after already being dropped. Therefore I am unaware how to tackle this problem. A: Without the dataframe and line in which the error occurs its not that clear what happens But in case you just want your script to run you could wrap it with a try/except block - like so: try: # Your code except KeyError: # Do what you want to do in case a KeyError occurs e.g. log something or print something A: After rethinking my approach, I have come up with a solution. I'll post my new approach here: indices = [] def outlier_indices(column_name): Q1 = np.percentile(df[column_name], 25, interpolation = 'midpoint') Q3 = np.percentile(df[column_name], 75, interpolation = 'midpoint') IQR = Q3 - Q1 # Upper bound upper = np.where(df[column_name].tolist() >= (Q3+1.5*IQR))[0].tolist() # Lower bound lower = np.where(df[column_name].tolist() <= (Q1-1.5*IQR))[0].tolist() indices.extend(upper) indices.extend(lower) for col in features.columns: outlier_indices(col) indices = set(indices) df.drop(indices, inplace=True) I have tackled the issue of getting duplicate indices by creating a list of indices and using set to remove the duplicate indices, which was then used to drop the outliers.
How can I fix: 'KeyError [x] not found in axis' when filtering dataframe
I am trying to filter my dataframe based on IQR for a few selected features. The code I use is the following: import pandas as pd import numpy as np # Load data df = pd.read_csv("dataframe.csv") features = df.loc[:, ('col1, col2, col3, col4, col5')] print("Old Shape: ", df.shape) def filtering(column_name): print(column_name) Q1 = np.percentile(df[column_name], 25, interpolation = 'midpoint') Q3 = np.percentile(df[column_name], 75, interpolation = 'midpoint') IQR = Q3 - Q1 # Upper bound upper = np.where(df[column_name] >= (Q3+1.5*IQR)) # Lower bound lower = np.where(df[column_name] <= (Q1-1.5*IQR)) ''' Removing the Outliers ''' df.drop(upper[0], inplace = True) df.drop(lower[0], inplace = True) print("New Shape: ", df.shape) print('==== done ====') for col in features.columns: filtering(col) The error (on line 28, df.drop(lower[0], inplace=True): KeyError: '[14] not found in axis' The KeyError is caused by the fact that an index is already dropped because it is an outlier in one of the features, after which it is detected again. Since it is already dropped, this index cannot be found. I am however unsure how it is detected as an outlier after already being dropped. Therefore I am unaware how to tackle this problem.
[ "Without the dataframe and line in which the error occurs its not that clear what happens\nBut in case you just want your script to run you could wrap it with a try/except block - like so:\ntry:\n # Your code\nexcept KeyError:\n # Do what you want to do in case a KeyError occurs e.g. log something or print something\n\n\n", "After rethinking my approach, I have come up with a solution. I'll post my new approach here:\nindices = []\ndef outlier_indices(column_name):\n Q1 = np.percentile(df[column_name], 25, interpolation = 'midpoint')\n Q3 = np.percentile(df[column_name], 75, interpolation = 'midpoint')\n IQR = Q3 - Q1\n \n # Upper bound\n upper = np.where(df[column_name].tolist() >= (Q3+1.5*IQR))[0].tolist()\n # Lower bound\n lower = np.where(df[column_name].tolist() <= (Q1-1.5*IQR))[0].tolist()\n indices.extend(upper)\n indices.extend(lower)\n\nfor col in features.columns:\n outlier_indices(col)\n\nindices = set(indices)\ndf.drop(indices, inplace=True)\n\nI have tackled the issue of getting duplicate indices by creating a list of indices and using set to remove the duplicate indices, which was then used to drop the outliers.\n" ]
[ 1, 0 ]
[]
[]
[ "dataframe", "filtering", "python" ]
stackoverflow_0074533385_dataframe_filtering_python.txt
Q: Request Line is too large - Gunicorn I have been using Flask for over a year, I used deploy my Flask app into production using Gunicorn WSGI server. Recently I encountered a weird error that, <html> <head> <title>Bad Request</title> </head> <body> <h1><p>Bad Request</p></h1> Request Line is too large (4269 &gt; 4094) </body> </html> I encountered this error for most of my GET requests. While debugging I came to know that, I need to increase the --limit-request-line config in Gunicorn after I increased that to 8190 in my Dockerfile, I was able to see the responses from the Flask app successfully. The dockerfile change I made was CMD gunicorn app:app -w 2 --threads 50 -b 0.0.0.0:8080 --limit-request-line 8190 --capture-output --log-level info My question here is, Why did the http request line size/length increase suddenly, because I haven't redeployed my Flask app in past 40 days, so the Gunicorn version wasn't changed. The server was returning responses perfectly for 39 days, then suddenly it gave me this error. Any idea on this? Edit1: I found that the issue was due to the large get request being formed due to multiple filter values. A: Set --limit-request-line to 0. This will allow unlimited request length. However, add a request size validation inside code to avoid any security risks.
Request Line is too large - Gunicorn
I have been using Flask for over a year, I used deploy my Flask app into production using Gunicorn WSGI server. Recently I encountered a weird error that, <html> <head> <title>Bad Request</title> </head> <body> <h1><p>Bad Request</p></h1> Request Line is too large (4269 &gt; 4094) </body> </html> I encountered this error for most of my GET requests. While debugging I came to know that, I need to increase the --limit-request-line config in Gunicorn after I increased that to 8190 in my Dockerfile, I was able to see the responses from the Flask app successfully. The dockerfile change I made was CMD gunicorn app:app -w 2 --threads 50 -b 0.0.0.0:8080 --limit-request-line 8190 --capture-output --log-level info My question here is, Why did the http request line size/length increase suddenly, because I haven't redeployed my Flask app in past 40 days, so the Gunicorn version wasn't changed. The server was returning responses perfectly for 39 days, then suddenly it gave me this error. Any idea on this? Edit1: I found that the issue was due to the large get request being formed due to multiple filter values.
[ "Set --limit-request-line to 0. This will allow unlimited request length. However, add a request size validation inside code to avoid any security risks.\n" ]
[ 0 ]
[]
[]
[ "backend", "flask", "gunicorn", "python", "rest" ]
stackoverflow_0072129004_backend_flask_gunicorn_python_rest.txt
Q: Python/django : Cannot import GeoIP I cannot import GeoIP in django. I searched and tested this error two days, but still could not know problem. Surely, I installed GeoDjango. I'm on MacOS 10.8 following is log by tested by django shell from django.contrib.gis import geoip module 'django.contrib.gis.geoip' from '/Library/Python/2.7/site-packages/django/contrib/gis/geoip/__init__.pyc' it works. even I could find geoip class at Library/Python/2.7/site-packages/django/contrib/gis/geoip/base.py from django.contrib.gis.geoip import geoip Traceback (most recent call last): File "", line 1, in ImportError: cannot import name geoip I also add django.contrib.gis to setting.py. Even I could find geoip class at eclipse shortcut. anyway, I tested one more thing in django shell. from django.contrib.gis.geoip.base import GeoIP Traceback (most recent call last): File "", line 1, in File "/Library/Python/2.7/site-packages/django/contrib/gis/geoip/base.py", line 6, in from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS File "/Library/Python/2.7/site-packages/django/contrib/gis/geoip/libgeoip.py", line 22, in if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). ' NameError: name 'GeoIPException' is not defined What I missing? Is there any way to test my error? A: It appears you need to install a C library in order to use GeoIP. Here is a snippet from the file that is throwing that error. # The shared library for the GeoIP C API. May be downloaded # from http://www.maxmind.com/download/geoip/api/c/ if lib_path: lib_name = None else: # TODO: Is this really the library name for Windows? lib_name = 'GeoIP' Once you have it installed somewhere you need to reference it in your settings.py GEOIP_LIBRARY_PATH = '/whatever' The library was trying to tell you this, but it seems that there is a bug that prevents it from raising the correct error. https://github.com/django/django/pull/103 A: Previously i was facing this issue : from django.contrib.gis.geoip import GeoIP ImportError: cannot import name GeoIP which is solved when i install this package. yum install GeoIP-devel -y A: The error is caused by moving the virtual environment folder. The solution is to create the environment again and reinstall the pygeoip library.
Python/django : Cannot import GeoIP
I cannot import GeoIP in django. I searched and tested this error two days, but still could not know problem. Surely, I installed GeoDjango. I'm on MacOS 10.8 following is log by tested by django shell from django.contrib.gis import geoip module 'django.contrib.gis.geoip' from '/Library/Python/2.7/site-packages/django/contrib/gis/geoip/__init__.pyc' it works. even I could find geoip class at Library/Python/2.7/site-packages/django/contrib/gis/geoip/base.py from django.contrib.gis.geoip import geoip Traceback (most recent call last): File "", line 1, in ImportError: cannot import name geoip I also add django.contrib.gis to setting.py. Even I could find geoip class at eclipse shortcut. anyway, I tested one more thing in django shell. from django.contrib.gis.geoip.base import GeoIP Traceback (most recent call last): File "", line 1, in File "/Library/Python/2.7/site-packages/django/contrib/gis/geoip/base.py", line 6, in from django.contrib.gis.geoip.libgeoip import GEOIP_SETTINGS File "/Library/Python/2.7/site-packages/django/contrib/gis/geoip/libgeoip.py", line 22, in if lib_path is None: raise GeoIPException('Could not find the GeoIP library (tried "%s"). ' NameError: name 'GeoIPException' is not defined What I missing? Is there any way to test my error?
[ "It appears you need to install a C library in order to use GeoIP. \nHere is a snippet from the file that is throwing that error.\n# The shared library for the GeoIP C API. May be downloaded\n# from http://www.maxmind.com/download/geoip/api/c/\nif lib_path:\n lib_name = None\nelse:\n # TODO: Is this really the library name for Windows?\n lib_name = 'GeoIP'\n\nOnce you have it installed somewhere you need to reference it in your settings.py\nGEOIP_LIBRARY_PATH = '/whatever'\n\nThe library was trying to tell you this, but it seems that there is a bug that prevents it from raising the correct error. https://github.com/django/django/pull/103\n", "Previously i was facing this issue : from django.contrib.gis.geoip import GeoIP ImportError: cannot import name GeoIP\nwhich is solved when i install this package.\nyum install GeoIP-devel -y\n\n", "The error is caused by moving the virtual environment folder.\nThe solution is to create the environment again and reinstall the pygeoip library.\n" ]
[ 1, 1, 0 ]
[]
[]
[ "django", "geodjango", "geoip", "python" ]
stackoverflow_0012761932_django_geodjango_geoip_python.txt
Q: Selenium executable_path has been deprecated When running my code I get the below error string, <string>:36: DeprecationWarning: executable_path has been deprecated, please pass in a Service object What could possibly be the issue? Below is the Selenium setup, options = webdriver.ChromeOptions() prefs = {"download.default_directory" : wd} options.add_experimental_option("prefs", prefs) options.add_argument("--headless") path = (chrome) driver = webdriver.Chrome(executable_path=path, options = options) driver.get('https://www.1linelogin.williams.com/1Line/xhtml/login.jsf?BUID=80') A: This error message DeprecationWarning: executable_path has been deprecated, please pass in a Service object means that the key executable_path will be deprecated in the upcoming releases. Once the key executable_path is deprecated you have to use an instance of the Service() class as follows: from selenium import webdriver from selenium.webdriver.chrome.service import Service path = (chrome) s = Service(path) driver = webdriver.Chrome(service=s) For more details see here A: DeprecationWarning: executable_path has been deprecated, please pass in a Service object In this it have to pass the service object. from selenium.webdriver.chrome.service import Service service = Service("path of execution") driver = webdriver.Chrome(service=service) From this it will work.
Selenium executable_path has been deprecated
When running my code I get the below error string, <string>:36: DeprecationWarning: executable_path has been deprecated, please pass in a Service object What could possibly be the issue? Below is the Selenium setup, options = webdriver.ChromeOptions() prefs = {"download.default_directory" : wd} options.add_experimental_option("prefs", prefs) options.add_argument("--headless") path = (chrome) driver = webdriver.Chrome(executable_path=path, options = options) driver.get('https://www.1linelogin.williams.com/1Line/xhtml/login.jsf?BUID=80')
[ "This error message\n\nDeprecationWarning: executable_path has been deprecated, please pass in a Service object\n\nmeans that the key executable_path will be deprecated in the upcoming releases.\nOnce the key executable_path is deprecated you have to use an instance of the Service() class as follows:\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\n\npath = (chrome)\ns = Service(path)\ndriver = webdriver.Chrome(service=s)\n\nFor more details see here\n", "DeprecationWarning: executable_path has been deprecated, please pass in a Service object\nIn this it have to pass the service object.\nfrom selenium.webdriver.chrome.service import Service\n\nservice = Service(\"path of execution\")\ndriver = webdriver.Chrome(service=service)\n\nFrom this it will work.\n" ]
[ 2, 0 ]
[]
[]
[ "python", "selenium", "selenium_chromedriver", "selenium_webdriver", "web_scraping" ]
stackoverflow_0071482512_python_selenium_selenium_chromedriver_selenium_webdriver_web_scraping.txt
Q: Sphinx autodoc : show-inheritance full name I have a hierarchy of modules with classes of the same name that subclass each other, e.g. # foo.py class Box: ... # bar.py class Box: ... # foobar.py import foo, bar class Box(foo.Box, bar.Box): ... My template for classes is setup with objname as title so that links remain short. {{ objname | escape | underline}} .. currentmodule:: {{ module }} .. autoclass:: {{ objname }} :show-inheritance: This results in the stub for foobar.Box to show the inheritance: Bases: Box, Box I would like it to use the full name instead, i.e.: Bases: foo.Box, bar.Box Is there a way to change the template used by show-inheritance? I can change the title of the class template to fullname but this makes the documentation very verbose. I tried to implement my own extension with a hook to autodoc-process-bases but with no success: I get the list of base classes but I cannot control how they get printed. A: The autodoc-process-bases hook can in fact use strings rather than classes themselves. I found a solution defining my own sphinx extension: def process_bases(app, name, obj, options, bases): ambiguity = getattr(obj, "__ambiguous_inheritance__", ()) for i, base in enumerate(bases): if base in ambiguity: bases[i] = ":class:`{}.{}`".format(base.__module__, base.__name__) Then I can add an attribute __ambiguous_inheritance__ to the classes of my choice and it will expand the ambiguous links to their full name. # foobar.py import foo, bar class Box(foo.Box, bar.Box): __ambiguous_inheritance__ = (foo.Box, bar.Box)
Sphinx autodoc : show-inheritance full name
I have a hierarchy of modules with classes of the same name that subclass each other, e.g. # foo.py class Box: ... # bar.py class Box: ... # foobar.py import foo, bar class Box(foo.Box, bar.Box): ... My template for classes is setup with objname as title so that links remain short. {{ objname | escape | underline}} .. currentmodule:: {{ module }} .. autoclass:: {{ objname }} :show-inheritance: This results in the stub for foobar.Box to show the inheritance: Bases: Box, Box I would like it to use the full name instead, i.e.: Bases: foo.Box, bar.Box Is there a way to change the template used by show-inheritance? I can change the title of the class template to fullname but this makes the documentation very verbose. I tried to implement my own extension with a hook to autodoc-process-bases but with no success: I get the list of base classes but I cannot control how they get printed.
[ "The autodoc-process-bases hook can in fact use strings rather than classes themselves. I found a solution defining my own sphinx extension:\ndef process_bases(app, name, obj, options, bases):\n ambiguity = getattr(obj, \"__ambiguous_inheritance__\", ())\n for i, base in enumerate(bases):\n if base in ambiguity:\n bases[i] = \":class:`{}.{}`\".format(base.__module__, base.__name__)\n\nThen I can add an attribute __ambiguous_inheritance__ to the classes of my choice and it will expand the ambiguous links to their full name.\n# foobar.py\nimport foo, bar\nclass Box(foo.Box, bar.Box):\n __ambiguous_inheritance__ = (foo.Box, bar.Box)\n\n" ]
[ 0 ]
[]
[]
[ "autodoc", "python", "python_sphinx" ]
stackoverflow_0074534143_autodoc_python_python_sphinx.txt
Q: CreateProcessW failed error:2 ssh_askpass: posix_spawn: No such file or directory Host key verification failed, jupyter notebook on remote server So I was following a tutorial to connect to my jupyter notebook which is running on my remote server so that I can access it on my local windows machine. These were the steps that I followed. On my remote server : jupyter notebook --no-browser --port=8889 Then on my local machine ssh -N -f -L localhost:8888:localhost:8889 *******@**********.de.gyan.com But I am getting an error CreateProcessW failed error:2 ssh_askpass: posix_spawn: No such file or directory Host key verification failed. How do I resolve this? Or is there is any other way to achieve the same? A: If you need the DISPLAY variable set because you want to use VcXsrc or another X-Server in Windows 10 the workaround is to add the host you want to connect to your known_hosts file. This can be done by calling ssh-keyscan -t rsa host.example.com | Out-File ~/.ssh/known_hosts -Append -Encoding ASCII; A: According to the openssh docs, the ssh client, which will usually prompt for a password on the command line, will try to show a GUI dialog for the user to enter his password, if SSH_ASKPASS and DISPLAY environment variables are set. On windows however, this is not properly supported yet, as the needed ssh_askpass binary is missing, and also because this seems still pretty X11 oriented. Git for windows however seemed to get it working properly. Of course, if you are just interested in entering your password on the command line, make sure the variables are unset. With the information you provided, it is however impossible to answer why the variable was set in the first place. A: You can show the values of "environment variables" with the POSIX shell command "env". Or you simply type echo $DISPLAY or echo $SSH_ASKPASS. May vary from shell to shell. Another way to solve the issue could be to insert your public RSA key into the .ssh/authorized_keys file on the target system, but this might only work if you enter this systems's ID into your local known_hosts file first, as described above. How to generate a RSA keypair: Use ssh-keygen -t rsa .... Regards, Stefan B. A: I believe you have a ssh version <= 8.6.0-beta1. It will work by just updating to version 8.6.0-beta1+ in your elevated powershell by choco upgrade openssh You can check your version by choco list -lo openssh # -lo: --localonly openssh 8.6.0-beta1 A: This happened to me when using cygwin. There was an ssh.exe in the PATH c:\windows\system32\ssh.exe ahead of /usr/bin/ssh. Fixing the PATH avoided the problem. Edited /etc/profile and added a line like this near the end: PATH=/usr/bin:${PATH} A: I have the same probelm when I connect remote Linux server with local Windows10 VScode. CreateProcessW failed error:2 ssh_askpass: posix_spawn: No such file or directory Host key verification failed. I solved this by upgrade OpenSSH to OpenSSH_for_Windows_8.9p1, LibreSSL 3.4.3. Download OpenSSH from https://github.com/PowerShell/Win32-OpenSSH/releases Decompress to somewhere Edit system environment variable 'PATH', and move it above old Openssh. A: For me the issue was with a missing exe in the config file under .ssh folder. My config file pointing to GIT tool connect.exe and I didn't have GIT for windows installed and after installing GIT, the issue is resolved
CreateProcessW failed error:2 ssh_askpass: posix_spawn: No such file or directory Host key verification failed, jupyter notebook on remote server
So I was following a tutorial to connect to my jupyter notebook which is running on my remote server so that I can access it on my local windows machine. These were the steps that I followed. On my remote server : jupyter notebook --no-browser --port=8889 Then on my local machine ssh -N -f -L localhost:8888:localhost:8889 *******@**********.de.gyan.com But I am getting an error CreateProcessW failed error:2 ssh_askpass: posix_spawn: No such file or directory Host key verification failed. How do I resolve this? Or is there is any other way to achieve the same?
[ "If you need the DISPLAY variable set because you want to use VcXsrc or another X-Server in Windows 10 the workaround is to add the host you want to connect to your known_hosts file.\nThis can be done by calling\nssh-keyscan -t rsa host.example.com | Out-File ~/.ssh/known_hosts -Append -Encoding ASCII;\n\n", "According to the openssh docs, the ssh client, which will usually prompt for a password on the command line, will try to show a GUI dialog for the user to enter his password, if SSH_ASKPASS and DISPLAY environment variables are set.\nOn windows however, this is not properly supported yet, as the needed ssh_askpass binary is missing, and also because this seems still pretty X11 oriented.\nGit for windows however seemed to get it working properly.\nOf course, if you are just interested in entering your password on the command line, make sure the variables are unset. With the information you provided, it is however impossible to answer why the variable was set in the first place.\n", "You can show the values of \"environment variables\" with the POSIX shell command \"env\".\nOr you simply type echo $DISPLAY or echo $SSH_ASKPASS. May vary from shell to shell.\nAnother way to solve the issue could be to insert your public RSA key into the .ssh/authorized_keys file on the target system, but this might only work if you enter this systems's ID into your local known_hosts file first, as described above.\nHow to generate a RSA keypair: Use ssh-keygen -t rsa ....\nRegards,\nStefan B.\n", "I believe you have a ssh version <= 8.6.0-beta1. It will work by just updating to version 8.6.0-beta1+ in your elevated powershell by\nchoco upgrade openssh\n\nYou can check your version by\nchoco list -lo openssh # -lo: --localonly\nopenssh 8.6.0-beta1\n\n", "This happened to me when using cygwin. There was an ssh.exe in the PATH c:\\windows\\system32\\ssh.exe ahead of /usr/bin/ssh. Fixing the PATH avoided the problem.\nEdited /etc/profile and added a line like this near the end:\nPATH=/usr/bin:${PATH}\n", "I have the same probelm when I connect remote Linux server with local Windows10 VScode.\nCreateProcessW failed error:2\nssh_askpass: posix_spawn: No such file or directory\nHost key verification failed.\nI solved this by upgrade OpenSSH to OpenSSH_for_Windows_8.9p1, LibreSSL 3.4.3.\n\nDownload OpenSSH from https://github.com/PowerShell/Win32-OpenSSH/releases\nDecompress to somewhere\nEdit system environment variable 'PATH', and move it above old Openssh.\n\n", "For me the issue was with a missing exe in the config file under .ssh folder. My config file pointing to GIT tool connect.exe and I didn't have GIT for windows installed and after installing GIT, the issue is resolved\n" ]
[ 6, 4, 0, 0, 0, 0, 0 ]
[]
[]
[ "jupyter", "jupyter_notebook", "localhost", "python", "remote_server" ]
stackoverflow_0060107347_jupyter_jupyter_notebook_localhost_python_remote_server.txt
Q: How to get Non-contextual Word Embeddings in BERT? I am already installed BERT, But I don't know how to get Non-contextual word embeddings. For example: input: 'Apple' output: [1,2,23,2,13,...] #embedding of 'Apple' How can i get these word embeddings? Thank you. I search some method, but no blogs have written the way. A: BERT uses static subword embeddings in its first layer, where they get summed with learned position embeddings. You can get the embeddings layer by calling model.embeddings.word_embeddings. You should be able to pass the indices that you get from a BertTokenizer to this layer and get the subword embeddings. There are, however, several caveats with static embeddings: these are not word embeddings but subwords that BERT internally uses (less frequent words get segmented into smaller units). The embeddings are of much worse quality than standard word embeddings (Word2Vec, FastText) because they are trained to get combined with position embeddings and serve in the later layers, not as standalone embeddings. There are also methods for getting high-quality word embeddings from BERT (and similar models). Those require training data and some computation. AFAIK the best methods are: Interpreting Pretrained Contextualized Representations via Reductions to Static Embeddings (Bommasani et al., ACL 2020). Obtaining Better Static Word Embeddings Using Contextual Embedding Models (Gupta & Jaggi, ACL 2021) with code on Github. A: Sloved. import torch from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") model = AutoModel.from_pretrained("bert-base-uncased") # get the word embedding from BERT def get_word_embedding(word:str): input_ids = torch.tensor(tokenizer.encode(word)).unsqueeze(0) # Batch size 1 # print(input_ids) outputs = model(input_ids) last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple # output[0] is token vector # output[1] is the mean pooling of all hidden states return last_hidden_states[0][1]
How to get Non-contextual Word Embeddings in BERT?
I am already installed BERT, But I don't know how to get Non-contextual word embeddings. For example: input: 'Apple' output: [1,2,23,2,13,...] #embedding of 'Apple' How can i get these word embeddings? Thank you. I search some method, but no blogs have written the way.
[ "BERT uses static subword embeddings in its first layer, where they get summed with learned position embeddings. You can get the embeddings layer by calling model.embeddings.word_embeddings. You should be able to pass the indices that you get from a BertTokenizer to this layer and get the subword embeddings.\nThere are, however, several caveats with static embeddings: these are not word embeddings but subwords that BERT internally uses (less frequent words get segmented into smaller units). The embeddings are of much worse quality than standard word embeddings (Word2Vec, FastText) because they are trained to get combined with position embeddings and serve in the later layers, not as standalone embeddings.\nThere are also methods for getting high-quality word embeddings from BERT (and similar models). Those require training data and some computation. AFAIK the best methods are:\n\nInterpreting Pretrained Contextualized Representations via Reductions to Static Embeddings (Bommasani et al., ACL 2020).\n\nObtaining Better Static Word Embeddings Using Contextual Embedding Models (Gupta & Jaggi, ACL 2021) with code on Github.\n\n\n", "Sloved.\nimport torch\nfrom transformers import AutoTokenizer, AutoModel\ntokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\n\nmodel = AutoModel.from_pretrained(\"bert-base-uncased\")\n\n# get the word embedding from BERT\ndef get_word_embedding(word:str):\n input_ids = torch.tensor(tokenizer.encode(word)).unsqueeze(0) # Batch size 1\n # print(input_ids)\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n # output[0] is token vector\n # output[1] is the mean pooling of all hidden states\n return last_hidden_states[0][1]\n\n\n\n" ]
[ 1, 0 ]
[]
[]
[ "bert_language_model", "nlp", "python", "pytorch" ]
stackoverflow_0074527928_bert_language_model_nlp_python_pytorch.txt
Q: Generate list of days for each employee EDIT: This solution worked for me I have the following dataframe in Python: Name days Start Date End Date EMP1 15 8/8/22 8/26/22 EMP2 3 6/9/22 6/13/22 EMP3 5 8/22/22 8/26/22 EMP3 5 8/1/22 8/5/22 EMP3 6 6/17/22 6/24/22 EMP4 4.5 7/18/22 7/22/22 EMP5 5 7/18/22 7/22/22 EMP6 5 8/15/22 8/19/22 EMP7 9 8/22/22 9/2/22 I need to generate the list of dates between the start and end date for each row whilst keeping the employee name . Is this simple to do in python? Really struggling to get it to do it by row . desired output (list each day between start and end date and employees name): Name date EMP1 8/8/22 EMP1 8/9/22 EMP1 8/10/22 EMP1 8/11/22 EMP1 8/12/22 EMP1 8/13/22 EMP1 8/14/22 EMP1 8/15/22 A: well, convert the start date ,enddate to date range then explode using that columns : df['Date'] = df.apply(lambda x: pd.date_range(start=x['Start Date'], end=x['End Date']), axis=1) output = df.explode('Date').drop(columns = ['days','Start Date','End Date']) output : >> Name Date 0 EMP1 2022-08-08 0 EMP1 2022-08-09 0 EMP1 2022-08-10 0 EMP1 2022-08-11 0 EMP1 2022-08-12 .. ... ... 8 EMP7 2022-08-29 8 EMP7 2022-08-30 8 EMP7 2022-08-31 8 EMP7 2022-09-01 8 EMP7 2022-09-02 [69 rows x 2 columns]
Generate list of days for each employee
EDIT: This solution worked for me I have the following dataframe in Python: Name days Start Date End Date EMP1 15 8/8/22 8/26/22 EMP2 3 6/9/22 6/13/22 EMP3 5 8/22/22 8/26/22 EMP3 5 8/1/22 8/5/22 EMP3 6 6/17/22 6/24/22 EMP4 4.5 7/18/22 7/22/22 EMP5 5 7/18/22 7/22/22 EMP6 5 8/15/22 8/19/22 EMP7 9 8/22/22 9/2/22 I need to generate the list of dates between the start and end date for each row whilst keeping the employee name . Is this simple to do in python? Really struggling to get it to do it by row . desired output (list each day between start and end date and employees name): Name date EMP1 8/8/22 EMP1 8/9/22 EMP1 8/10/22 EMP1 8/11/22 EMP1 8/12/22 EMP1 8/13/22 EMP1 8/14/22 EMP1 8/15/22
[ "well, convert the start date ,enddate to date range then explode using that\ncolumns :\ndf['Date'] = df.apply(lambda x: pd.date_range(start=x['Start Date'], end=x['End Date']), axis=1)\noutput = df.explode('Date').drop(columns = ['days','Start Date','End Date'])\n\noutput :\n>>\n Name Date\n0 EMP1 2022-08-08\n0 EMP1 2022-08-09\n0 EMP1 2022-08-10\n0 EMP1 2022-08-11\n0 EMP1 2022-08-12\n.. ... ...\n8 EMP7 2022-08-29\n8 EMP7 2022-08-30\n8 EMP7 2022-08-31\n8 EMP7 2022-09-01\n8 EMP7 2022-09-02\n\n[69 rows x 2 columns]\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074534590_pandas_python.txt
Q: average of the sum outputted in while loop with python I don't know why I'm having so much trouble with this. I need to get the average of the sum that is outputted from def main():. I have tried to put the average within the def main and tried to use a separate def. both ways do not come out as expected. Below is where I am at currently. def main(): totalMiles = 0 dayTotal = 0 mileageGoal = eval( input("How many miles would you like to run this week? ")) while totalMiles != mileageGoal: dailyTotal = eval( input(f"How many miles did you run on day {dayTotal + 1}? ")) totalMiles = totalMiles + dailyTotal dayTotal = dayTotal + 1 if totalMiles >= mileageGoal: print("You hit your goal! Keep going!") print(f"You ran {totalMiles} miles!") print(f"You completed your goal in {dayTotal} days! Congratulations!") break main() def average(): average = totalMiles / dayTotal return average print('the average miles you ran was:', average) A: after I posted this question I tried a different path which work. ''' def main(): totalMiles = 0 dayTotal = 0 mileageGoal = eval( input("How many miles would you like to run this week? ")) while totalMiles != mileageGoal: dailyTotal = eval( input(f"How many miles did you run on day {dayTotal + 1}? ")) totalMiles = totalMiles + dailyTotal dayTotal = dayTotal + 1 if totalMiles >= mileageGoal: print("You hit your goal! Keep going!") print(f"You ran {totalMiles} miles!") print( f"You completed your goal in {dayTotal} days! Congratulations!") break average = totalMiles / dayTotal print('the average miles you ran was:', average) main() ''' although, if anyone has an easier way of going about this please post it. Anything helps! A: This is because of what is called "scope". If you create a variable in a function, it will (normally) be "scoped" only to that function, so you can't access it from outside the function. A simple fix would be to declare the variables as "global" that you need to use outside of the function: global totalMiles. But if you can avoid that, it is best to do so. Your own fix to it is a good way to do it, just combining them into one function. But you could also have a function that returns a value, so you could return the total miles and day total from the function, assign it to a variable when you call it and then use that variable elsewhere in the code.
average of the sum outputted in while loop with python
I don't know why I'm having so much trouble with this. I need to get the average of the sum that is outputted from def main():. I have tried to put the average within the def main and tried to use a separate def. both ways do not come out as expected. Below is where I am at currently. def main(): totalMiles = 0 dayTotal = 0 mileageGoal = eval( input("How many miles would you like to run this week? ")) while totalMiles != mileageGoal: dailyTotal = eval( input(f"How many miles did you run on day {dayTotal + 1}? ")) totalMiles = totalMiles + dailyTotal dayTotal = dayTotal + 1 if totalMiles >= mileageGoal: print("You hit your goal! Keep going!") print(f"You ran {totalMiles} miles!") print(f"You completed your goal in {dayTotal} days! Congratulations!") break main() def average(): average = totalMiles / dayTotal return average print('the average miles you ran was:', average)
[ "after I posted this question I tried a different path which work.\n'''\ndef main():\n totalMiles = 0\n dayTotal = 0\n mileageGoal = eval(\n input(\"How many miles would you like to run this week? \"))\n while totalMiles != mileageGoal:\n dailyTotal = eval(\n input(f\"How many miles did you run on day {dayTotal + 1}? \"))\n totalMiles = totalMiles + dailyTotal\n dayTotal = dayTotal + 1\n if totalMiles >= mileageGoal:\n print(\"You hit your goal! Keep going!\")\n print(f\"You ran {totalMiles} miles!\")\n print(\n f\"You completed your goal in {dayTotal} days! Congratulations!\")\n break\n average = totalMiles / dayTotal\n print('the average miles you ran was:', average)\n\n\nmain()\n\n'''\nalthough, if anyone has an easier way of going about this please post it. Anything helps!\n", "This is because of what is called \"scope\". If you create a variable in a function, it will (normally) be \"scoped\" only to that function, so you can't access it from outside the function.\nA simple fix would be to declare the variables as \"global\" that you need to use outside of the function: global totalMiles. But if you can avoid that, it is best to do so.\nYour own fix to it is a good way to do it, just combining them into one function. But you could also have a function that returns a value, so you could return the total miles and day total from the function, assign it to a variable when you call it and then use that variable elsewhere in the code.\n" ]
[ 0, 0 ]
[]
[]
[ "python", "while_loop" ]
stackoverflow_0074534869_python_while_loop.txt
Q: Python subprocess can't find Pythonpath module I am trying to use subprocess.run(['python3.9', "scripts/example.py"], check=True). example.py uses a module, that I have added to the PYTHONPATH. However, whenever I run the above line, the module is not found. The confusing part for me is, that printing sys.path inside of example.py I do see the path to my module. But when I am running os.system("which python") or os.system("echo $PYTHONPATH") inside example.py, it returns/prints nothing. A: Looks like you need to check the doc for the env parameter of subprocess.run and set it appropriately. Side note: typically you would want to use the exact same Python interpreter for the sub-process call, so you would write: subprocess.run([sys.executable, 'scripts/example.py'], ...), unless of course you really do want 'python3.9' explicitly and nothing else (which would be surprising).
Python subprocess can't find Pythonpath module
I am trying to use subprocess.run(['python3.9', "scripts/example.py"], check=True). example.py uses a module, that I have added to the PYTHONPATH. However, whenever I run the above line, the module is not found. The confusing part for me is, that printing sys.path inside of example.py I do see the path to my module. But when I am running os.system("which python") or os.system("echo $PYTHONPATH") inside example.py, it returns/prints nothing.
[ "Looks like you need to check the doc for the env parameter of subprocess.run and set it appropriately.\nSide note: typically you would want to use the exact same Python interpreter for the sub-process call, so you would write: subprocess.run([sys.executable, 'scripts/example.py'], ...), unless of course you really do want 'python3.9' explicitly and nothing else (which would be surprising).\n" ]
[ 1 ]
[]
[]
[ "python", "pythonpath", "subprocess" ]
stackoverflow_0074532093_python_pythonpath_subprocess.txt
Q: Check when a columns value changes by a large amount in pandas I am looking to write some code to check when a columns value changes by more than a specific amount, for example more than 20% eg: # | A | --+------+ 1 | 20 | 2 | 21 | 3 | 20 | 4 | 22 | 5 | 35 | 6 | 25 | it would flag row 5 A: you can use something like this: df=pd.DataFrame(data={'id':[1,2,3,4,5,6],'A':[20,21,20,22,35,25]}) ''' id A 0 1 20 1 2 21 2 3 20 3 4 22 4 5 35 5 6 25 ''' df['percent'] = (df['A'] / df['A'].shift(1) - 1).fillna(0) * 100 #calculate percentage print(df) ''' id A percent 0 1 20 0.000000 1 2 21 5.000000 2 3 20 -4.761905 3 4 22 10.000000 4 5 35 59.090909 5 6 25 -28.571429 ''' df.loc[df['percent']>= 20,"flag"]="Y" #create a new column #for negative values #df.loc[df['percent'].abs() >= 20,"flag"]="Y"
Check when a columns value changes by a large amount in pandas
I am looking to write some code to check when a columns value changes by more than a specific amount, for example more than 20% eg: # | A | --+------+ 1 | 20 | 2 | 21 | 3 | 20 | 4 | 22 | 5 | 35 | 6 | 25 | it would flag row 5
[ "you can use something like this:\ndf=pd.DataFrame(data={'id':[1,2,3,4,5,6],'A':[20,21,20,22,35,25]})\n'''\n id A\n0 1 20\n1 2 21\n2 3 20\n3 4 22\n4 5 35\n5 6 25\n'''\ndf['percent'] = (df['A'] / df['A'].shift(1) - 1).fillna(0) * 100 #calculate percentage\n\nprint(df)\n'''\n id A percent\n0 1 20 0.000000\n1 2 21 5.000000\n2 3 20 -4.761905\n3 4 22 10.000000\n4 5 35 59.090909\n5 6 25 -28.571429\n'''\ndf.loc[df['percent']>= 20,\"flag\"]=\"Y\" #create a new column\n\n#for negative values\n#df.loc[df['percent'].abs() >= 20,\"flag\"]=\"Y\"\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074534797_dataframe_pandas_python.txt
Q: Python Class private attribute created inside an exec function in __init__ method becomes public attribute instead of private attribute I am trying to creating a class Customer which creates it's attribute from sqlalchemy query object. data = {'Name':'John Doe','Age':67} #in the real code , data is a not a dictionary but an object. class Customer: def __init__(self,data) -> None: assert type(data) == Customers for key in data.keys(): exec(f"self.__{key[1:] if key.startswith('_') else key} = data['{key}']",{'self':self,'data':data}) @property def name(self): return self.__Name data['bank'] = green person = Customer(data) I was able to access the Customer attributes as a public attribute : print(person.__Name) it prints out John Doe but when i try to access the attribute through the name method, like this : print(person.name) it raises an error: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Users\HP\PyProject\FlaskProject\green\bank\modelx.py", line 66, in name return self.__Name AttributeError: 'Customer' object has no attribute '_Customer__Name' How can i make the class atributes created in the exec function act as a private attribute of the class and not a public attribute. A: You don' need exec here. Use setattr. for key in data: setattr(self, key[1:] if key.startswith('_') else key, data[key]) Also, use isinstance, not type comparison. assert isinstance(data, Customers) though in your example, data is not an instance of Customers; it's an ordinary dict passed to Customer.__init__.
Python Class private attribute created inside an exec function in __init__ method becomes public attribute instead of private attribute
I am trying to creating a class Customer which creates it's attribute from sqlalchemy query object. data = {'Name':'John Doe','Age':67} #in the real code , data is a not a dictionary but an object. class Customer: def __init__(self,data) -> None: assert type(data) == Customers for key in data.keys(): exec(f"self.__{key[1:] if key.startswith('_') else key} = data['{key}']",{'self':self,'data':data}) @property def name(self): return self.__Name data['bank'] = green person = Customer(data) I was able to access the Customer attributes as a public attribute : print(person.__Name) it prints out John Doe but when i try to access the attribute through the name method, like this : print(person.name) it raises an error: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Users\HP\PyProject\FlaskProject\green\bank\modelx.py", line 66, in name return self.__Name AttributeError: 'Customer' object has no attribute '_Customer__Name' How can i make the class atributes created in the exec function act as a private attribute of the class and not a public attribute.
[ "You don' need exec here. Use setattr.\nfor key in data:\n setattr(self, key[1:] if key.startswith('_') else key, data[key])\n\nAlso, use isinstance, not type comparison.\nassert isinstance(data, Customers)\n\nthough in your example, data is not an instance of Customers; it's an ordinary dict passed to Customer.__init__.\n" ]
[ 0 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074535070_python_python_3.x.txt
Q: 502 Bad Gateway nginx/1.18.0 (Ubuntu) Django Digital ocean I want to deploy my django project with Ubuntu and Digital Ocean. It's not the first time I do it but now I keep getting this error and I don't know what's causing it. I used this video as guide for the process: https://www.youtube.com/watch?v=US9BkvzuIxw. It's really annoying because the only message that I get is "502 Bad Gateway nginx/1.18.0 (Ubuntu)" and what I found on internet to solve it doesn't work. All nginx tests I run say it works correctly. This is the code where I think the error must be: /etc/nginx/sites-available/locallibrary server { server_name vvmwp.nl; location = /favicon.ico { access_log off; log_not_found off; } location /static/ { root /home/sammy/locallibrary; } location / { include proxy_params; proxy_pass http://unix:/run/gunicorn.sock; } } /etc/systemd/system/gunicorn.service [Unit] Description=gunicorn daemon Requires=gunicorn.socket After=network.target [Service] User=sammy Group=sammy EnvironmentFile=/home/sammy/locallibrary/env WorkingDirectory=/home/sammy/locallibrary ExecStart=/home/sammy/env/bin/gunicorn \ --access-logfile - \ --workers 3 \ --bind unix:/run/gunicorn.sock \ locallibrary.wsgi:application [Install] WantedBy=multi-user.target /etc/systemd/system/gunicorn.socket [Unit] Description=gunicorn socket [Socket] ListenStream=/run/gunicorn.sock [Install] WantedBy=sockets.target Thanks in advance A: I faced the same issue and nothing worked but then I killed the previous port in my case was 8080 and installed nginx and pm2 again and everything worked fine.
502 Bad Gateway nginx/1.18.0 (Ubuntu) Django Digital ocean
I want to deploy my django project with Ubuntu and Digital Ocean. It's not the first time I do it but now I keep getting this error and I don't know what's causing it. I used this video as guide for the process: https://www.youtube.com/watch?v=US9BkvzuIxw. It's really annoying because the only message that I get is "502 Bad Gateway nginx/1.18.0 (Ubuntu)" and what I found on internet to solve it doesn't work. All nginx tests I run say it works correctly. This is the code where I think the error must be: /etc/nginx/sites-available/locallibrary server { server_name vvmwp.nl; location = /favicon.ico { access_log off; log_not_found off; } location /static/ { root /home/sammy/locallibrary; } location / { include proxy_params; proxy_pass http://unix:/run/gunicorn.sock; } } /etc/systemd/system/gunicorn.service [Unit] Description=gunicorn daemon Requires=gunicorn.socket After=network.target [Service] User=sammy Group=sammy EnvironmentFile=/home/sammy/locallibrary/env WorkingDirectory=/home/sammy/locallibrary ExecStart=/home/sammy/env/bin/gunicorn \ --access-logfile - \ --workers 3 \ --bind unix:/run/gunicorn.sock \ locallibrary.wsgi:application [Install] WantedBy=multi-user.target /etc/systemd/system/gunicorn.socket [Unit] Description=gunicorn socket [Socket] ListenStream=/run/gunicorn.sock [Install] WantedBy=sockets.target Thanks in advance
[ "I faced the same issue and nothing worked but then I killed the previous port in my case was 8080 and installed nginx and pm2 again and everything worked fine.\n" ]
[ 0 ]
[]
[]
[ "bad_gateway", "django", "nginx", "python" ]
stackoverflow_0071609299_bad_gateway_django_nginx_python.txt
Q: tkinter tab width incorrect When creating text on a canvas using the create_text method the width of a tab is not what it should be, as indicated by font.measure. import tkinter as tk from tkinter.font import Font root = tk.Tk() canvas = tk.Canvas(root, width=300, height=300) canvas.pack() font = Font(family='Arial', size=12) s1 = "a\tb" s2 = "a c" print("Width:", s1, font.measure(s1)) # Width: a b 30 print("Width:", s2, font.measure(s2)) # Width: a c 33 canvas.create_text(10, 10, text=s1, font=font, anchor="nw") canvas.create_text(10, 50, text=s2, font=font, anchor="nw") root.mainloop() The results of font.measure suggest that the line with spaces should be a little longer, but what it displays is: Showing that the width of the tab is significantly larger than the spaces. Using different fonts will result in differently sized tabs, but still inaccurate measurements. The measured width of the text without tabs is correct. How can I get the correct tab width? Is this a bug? A: The problem you've highlighted is due to the Canvas text object not having a tabs attribute. Maybe someone knows how to work around that but usually when displaying tabulation a tk.Text object is used which has font and tabs attributes. So the easiest solution is to make a tk.Text object T, define font and tabs, insert your messages into T then insert T into a Canvas window W. Something like this. import tkinter as tk from tkinter import font app = tk.Tk() fixed = 1 # test Arial & Courier New FONT = font.Font(family = ["arial", "courier new"][fixed], size = 12) wide = 4 * FONT.measure(0) high = FONT.metrics("linespace") dent = (wide, tk.LEFT) canvas = tk.Canvas(app) canvas.grid(sticky =tk.NSEW) s1, s2 = "a\tb\n", "a c" T = tk.Text( canvas, background = "SystemButtonFace", relief = tk.FLAT, font = FONT, tabs = dent, width = 20, height = 3, highlightthickness = 0, borderwidth = 0) W = canvas.create_window(wide, high, window = T, anchor = tk.NW) T.insert("1.0", s1) T.insert("insert", s2) app.mainloop() A: Does this will help? In line 12, I just added \t s2 = "a\tc" Result:
tkinter tab width incorrect
When creating text on a canvas using the create_text method the width of a tab is not what it should be, as indicated by font.measure. import tkinter as tk from tkinter.font import Font root = tk.Tk() canvas = tk.Canvas(root, width=300, height=300) canvas.pack() font = Font(family='Arial', size=12) s1 = "a\tb" s2 = "a c" print("Width:", s1, font.measure(s1)) # Width: a b 30 print("Width:", s2, font.measure(s2)) # Width: a c 33 canvas.create_text(10, 10, text=s1, font=font, anchor="nw") canvas.create_text(10, 50, text=s2, font=font, anchor="nw") root.mainloop() The results of font.measure suggest that the line with spaces should be a little longer, but what it displays is: Showing that the width of the tab is significantly larger than the spaces. Using different fonts will result in differently sized tabs, but still inaccurate measurements. The measured width of the text without tabs is correct. How can I get the correct tab width? Is this a bug?
[ "The problem you've highlighted is due to the Canvas text object not having a tabs attribute.\nMaybe someone knows how to work around that but usually when displaying tabulation a tk.Text object is used which has font and tabs attributes.\nSo the easiest solution is to make a tk.Text object T, define font and tabs,\ninsert your messages into T then insert T into a Canvas window W.\nSomething like this.\nimport tkinter as tk\nfrom tkinter import font\n\napp = tk.Tk()\n\nfixed = 1 # test Arial & Courier New\nFONT = font.Font(family = [\"arial\", \"courier new\"][fixed], size = 12)\nwide = 4 * FONT.measure(0)\nhigh = FONT.metrics(\"linespace\")\ndent = (wide, tk.LEFT)\n\ncanvas = tk.Canvas(app)\ncanvas.grid(sticky =tk.NSEW)\n\ns1, s2 = \"a\\tb\\n\", \"a c\"\n\nT = tk.Text(\n canvas,\n background = \"SystemButtonFace\",\n relief = tk.FLAT,\n font = FONT,\n tabs = dent,\n width = 20,\n height = 3,\n highlightthickness = 0,\n borderwidth = 0)\n\nW = canvas.create_window(wide, high, window = T, anchor = tk.NW)\n\nT.insert(\"1.0\", s1)\nT.insert(\"insert\", s2)\n\napp.mainloop()\n\n", "Does this will help? In line 12, I just added \\t\ns2 = \"a\\tc\"\n\nResult:\n\n" ]
[ 1, 0 ]
[]
[]
[ "canvas", "fonts", "python", "tkinter" ]
stackoverflow_0072148360_canvas_fonts_python_tkinter.txt
Q: how to stop the automatic tab closing in selenium? from selenium import webdriver driver=webdriver.Chrome(executable_path="C:\\Users\\DELL\\PycharmProjects\\drivers\\chromedriver.exe") driver.get("http://www.letskodeit.com") i had written the code like this and the code was executed without any errors ,but the concern is the website which it was opening automatically by "get" command is closing automatically after it was opened . please help me in sorting out this problem please help me in sorting out the issue. A: If you want to keep the driver to stay open, you have to use the detach option when creating the driver instance. As following: from selenium import webdriver from selenium.webdriver.chrome.options import Options chrome_options = Options() chrome_options.add_experimental_option("detach", True) driver = webdriver.Chrome(options=chrome_options, executable_path="C:\\Users\\DELL\\PycharmProjects\\drivers\\chromedriver.exe") driver.get("http://www.letskodeit.com")
how to stop the automatic tab closing in selenium?
from selenium import webdriver driver=webdriver.Chrome(executable_path="C:\\Users\\DELL\\PycharmProjects\\drivers\\chromedriver.exe") driver.get("http://www.letskodeit.com") i had written the code like this and the code was executed without any errors ,but the concern is the website which it was opening automatically by "get" command is closing automatically after it was opened . please help me in sorting out this problem please help me in sorting out the issue.
[ "If you want to keep the driver to stay open, you have to use the detach option when creating the driver instance.\nAs following:\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nchrome_options = Options()\nchrome_options.add_experimental_option(\"detach\", True)\ndriver = webdriver.Chrome(options=chrome_options, executable_path=\"C:\\\\Users\\\\DELL\\\\PycharmProjects\\\\drivers\\\\chromedriver.exe\")\ndriver.get(\"http://www.letskodeit.com\")\n\n" ]
[ 0 ]
[]
[]
[ "python", "selenium", "selenium_chromedriver", "selenium_webdriver" ]
stackoverflow_0074535111_python_selenium_selenium_chromedriver_selenium_webdriver.txt
Q: How to write to an abstract property in Python 3.4+ In Python 3.6, Let's say I have an abstract class MyAbstractClass from abc import ABC, abstractmethod class MyAbstractClass(ABC): @property @abstractmethod def myProperty(self): pass and a class MyInstantiatableClass inherit from it. So how do I write to the property myProperty on instantiation of an object from this class? I'd like to be able to both set and get myProperty. Below doesn't work. from MyAbstractClass import MyAbstractClass class MyInstantiatableClass(MyAbstractClass): def __init__(self, desiredValueOfMyProperty): ???? @myProperty.setter def myProperty(self, desiredValueOfMyProperty): # value coming from __init__ self._myProperty = desiredValueOfMyProperty And a main function, say, from MyInstantiatableClass import MyInstantiatableClass def main(): MyInstantiatableClass(3) # 3 is the desiredValueOfMyProperty for this instantiation MyInstantiatableClass(5) # 5 is the desiredValueOfMyProperty for this instantiation A: It seems there's a discrepancy here; using @property along with @abstractmethod doesn't seem to enforce classes that inherit from your abc to need to define both setter and getter. Using this: @property @abstractmethod def myProperty(self): pass @myProperty.setter @abstractmethod def myProperty(self): pass and then providing an implementation only for the getter in the class works and allows for instantiation: @property def myProperty(self): return self._myProperty This is due to the fact that only one name (myProperty) appears in the namespace of the ABC, when you override in the base class, you only need to define this one name. There's a way around that enforces it. You can create separate abstract methods and pass them on to property directly: class MyAbstractClass(ABC): @abstractmethod def getProperty(self): pass @abstractmethod def setProperty(self, val): pass myAbstractProperty = property(getProperty, setProperty) Providing an implementation for this abc now requires both getter and setter to have an implementation (both names that have been listed as abstractmethods in MyAbstractClass namespace need to have an implementation): class MyInstantiatableClass(MyAbstractClass): def getProperty(self): return self._Property def setProperty(self, val): self._Property = val myAbstractProperty = property(getProperty, setProperty) Implementing them is exactly the same as any old property. There's no difference there. A: For example, you can define the abstract getter, setter and deleter in Person abstract class, override them in Student class which extends Person abstract class as shown below. *@abstractmethod must be the innermost decorator otherwise error occurs: from abc import ABC, abstractmethod class Person(ABC): @property @abstractmethod # The innermost decorator def name(self): # Abstract getter pass @name.setter @abstractmethod # The innermost decorator def name(self, name): # Abstract setter pass @name.deleter @abstractmethod # The innermost decorator def name(self): # Abstract deleter pass class Student(Person): def __init__(self, name): self._name = name @property def name(self): # Overrides abstract getter return self._name @name.setter def name(self, name): # Overrides abstract setter self._name = name @name.deleter def name(self): # Overrides abstract deleter del self._name Then, you can instantiate Student class and call the getter, setter and deleter as shown below: obj = Student("John") # Instantiates "Student" class print(obj.name) # Getter obj.name = "Tom" # Setter print(obj.name) # Getter del obj.name # Deleter print(hasattr(obj, "name")) Output: John Tom False You can see my answer which explains more about abstract property.
How to write to an abstract property in Python 3.4+
In Python 3.6, Let's say I have an abstract class MyAbstractClass from abc import ABC, abstractmethod class MyAbstractClass(ABC): @property @abstractmethod def myProperty(self): pass and a class MyInstantiatableClass inherit from it. So how do I write to the property myProperty on instantiation of an object from this class? I'd like to be able to both set and get myProperty. Below doesn't work. from MyAbstractClass import MyAbstractClass class MyInstantiatableClass(MyAbstractClass): def __init__(self, desiredValueOfMyProperty): ???? @myProperty.setter def myProperty(self, desiredValueOfMyProperty): # value coming from __init__ self._myProperty = desiredValueOfMyProperty And a main function, say, from MyInstantiatableClass import MyInstantiatableClass def main(): MyInstantiatableClass(3) # 3 is the desiredValueOfMyProperty for this instantiation MyInstantiatableClass(5) # 5 is the desiredValueOfMyProperty for this instantiation
[ "It seems there's a discrepancy here; using @property along with @abstractmethod doesn't seem to enforce classes that inherit from your abc to need to define both setter and getter. Using this:\n@property\n@abstractmethod\ndef myProperty(self):\n pass\n\n@myProperty.setter\n@abstractmethod\ndef myProperty(self):\n pass\n\nand then providing an implementation only for the getter in the class works and allows for instantiation:\n@property\ndef myProperty(self):\n return self._myProperty\n\nThis is due to the fact that only one name (myProperty) appears in the namespace of the ABC, when you override in the base class, you only need to define this one name.\nThere's a way around that enforces it. You can create separate abstract methods and pass them on to property directly:\nclass MyAbstractClass(ABC):\n\n @abstractmethod\n def getProperty(self):\n pass\n\n @abstractmethod\n def setProperty(self, val):\n pass\n\n myAbstractProperty = property(getProperty, setProperty)\n\nProviding an implementation for this abc now requires both getter and setter to have an implementation (both names that have been listed as abstractmethods in MyAbstractClass namespace need to have an implementation):\nclass MyInstantiatableClass(MyAbstractClass):\n\n def getProperty(self):\n return self._Property\n\n def setProperty(self, val):\n self._Property = val\n myAbstractProperty = property(getProperty, setProperty)\n\nImplementing them is exactly the same as any old property. There's no difference there.\n", "For example, you can define the abstract getter, setter and deleter in Person abstract class, override them in Student class which extends Person abstract class as shown below. *@abstractmethod must be the innermost decorator otherwise error occurs:\nfrom abc import ABC, abstractmethod\n\nclass Person(ABC):\n @property\n @abstractmethod # The innermost decorator\n def name(self): # Abstract getter\n pass\n\n @name.setter\n @abstractmethod # The innermost decorator\n def name(self, name): # Abstract setter\n pass\n\n @name.deleter\n @abstractmethod # The innermost decorator\n def name(self): # Abstract deleter\n pass\n\nclass Student(Person):\n def __init__(self, name):\n self._name = name\n \n @property\n def name(self): # Overrides abstract getter\n return self._name\n \n @name.setter\n def name(self, name): # Overrides abstract setter\n self._name = name\n \n @name.deleter\n def name(self): # Overrides abstract deleter \n del self._name\n\nThen, you can instantiate Student class and call the getter, setter and deleter as shown below:\nobj = Student(\"John\") # Instantiates \"Student\" class\nprint(obj.name) # Getter\nobj.name = \"Tom\" # Setter\nprint(obj.name) # Getter\ndel obj.name # Deleter\nprint(hasattr(obj, \"name\"))\n\nOutput:\nJohn\nTom\nFalse\n\nYou can see my answer which explains more about abstract property.\n" ]
[ 6, 0 ]
[]
[]
[ "abstract_class", "inheritance", "python", "python_3.x" ]
stackoverflow_0044376851_abstract_class_inheritance_python_python_3.x.txt
Q: Discord.py not excecuting This is my code in python whenever I click the start button it automatically stops by the way I am using replit IDE. import discord class MyClient(discord.Client): async def on_ready(self): print(f'Logged on as {self.user}!') async def on_message(self, message): print(f'Message from {message.author}: {message.content}') intents = discord.Intents.default() intents.message_content = True client = MyClient(intents=intents) class MyClient(discord.Client): async def on_ready(self): print(f'Logged on as {self.user}!') async def on_message(self, message): print(f'Message from {message.author}: {message.content}') intents = discord.Intents.default() intents.message_content = True client = MyClient(intents=intents) import asyncio async def client_start(): await client.start('XYZ') I really don't know what to try I expected the application to be started automatically, I am not a python expert my friend asked me this, now I am asking you guys kindly help. A: You need to start a server, otherwise, it won't keep running. Try this: https://github.com/sdrrv/Fate-Wielding-Bot/blob/master/keep_alive.py A: You're never starting it. You created a client_start() function, but you're not using it so all this script does is create some variables & then exit. Next, your code seems to have the same things twice, barring different indentation levels. That doesn't seem right. Is this a mistake in your post or does it actually look like that? Also, stop abusing replit to run bots on. It's not what the platform was designed for, and it brings loads of issues along with it that you can't do anything about (like getting ratelimited because of other people). Get an actual VPS or host it yourself.
Discord.py not excecuting
This is my code in python whenever I click the start button it automatically stops by the way I am using replit IDE. import discord class MyClient(discord.Client): async def on_ready(self): print(f'Logged on as {self.user}!') async def on_message(self, message): print(f'Message from {message.author}: {message.content}') intents = discord.Intents.default() intents.message_content = True client = MyClient(intents=intents) class MyClient(discord.Client): async def on_ready(self): print(f'Logged on as {self.user}!') async def on_message(self, message): print(f'Message from {message.author}: {message.content}') intents = discord.Intents.default() intents.message_content = True client = MyClient(intents=intents) import asyncio async def client_start(): await client.start('XYZ') I really don't know what to try I expected the application to be started automatically, I am not a python expert my friend asked me this, now I am asking you guys kindly help.
[ "You need to start a server, otherwise, it won't keep running.\nTry this:\nhttps://github.com/sdrrv/Fate-Wielding-Bot/blob/master/keep_alive.py\n", "You're never starting it. You created a client_start() function, but you're not using it so all this script does is create some variables & then exit.\nNext, your code seems to have the same things twice, barring different indentation levels. That doesn't seem right. Is this a mistake in your post or does it actually look like that?\nAlso, stop abusing replit to run bots on. It's not what the platform was designed for, and it brings loads of issues along with it that you can't do anything about (like getting ratelimited because of other people). Get an actual VPS or host it yourself.\n" ]
[ 1, 0 ]
[]
[]
[ "discord", "discord.py", "python", "replit" ]
stackoverflow_0074534560_discord_discord.py_python_replit.txt
Q: Is it safe to create a Python class which is simultaneously sync and async iterator? Is it safe/bad practice to make a class both iterator and async iterator? Example: import asyncio class Iter: def __init__(self): self.i = 0 self.elems = list(range(10)) def __iter__(self): return self def __aiter__(self): return self def __next__(self): if self.i >= len(self.elems): raise StopIteration self.i += 1 return self.elems[self.i - 1] async def __anext__(self): if self.i >= len(self.elems): raise StopAsyncIteration self.i += 1 return self.elems[self.i - 1] async def main(): print("async usage:") async for elem in Iter(): print(elem) print("sync usage:") for elem in Iter(): print(elem) try: asyncio.run(main()) except RuntimeError: await main() I surfed the net and didn't find anybody asking similar question or discussing the problem. A: The problem there is not the sync iterator: unless used in multi-threaded code it should be ok (but not otherwise). Your async code, however, keeps the state in a single instance, and if it is ever used in more than a task at once, the states will mix up. (Also, if you have a single task using it, but nests iterations - async or otherwise - in the same instance). It is easily resolved by returning an auxiliar object that will contain your i counter, for each of __iter__ and __aiter__ - or simply return a generator with the counter in a closure - that is way easier than implementing __next__ and __anext__: class Iter: def __init__(self): self.elems = list(range(10)) def __iter__(self): return iter(self.elems) def __aiter__(self): async def _iter(): yield from iter(self.elems) return _iter Now if you need some custom logic to Actually go into __next__, other than just sequentially yeld the values of a list, the associated iterator class logic can be of help: ... class _InnerItter: def __init__(self, parent): self.parent = parent self.i = 0 def __next__(self): if self.i >= len(self.parent.elems): raise StopIteration self.i += 1 return self.parent.elems[self.i - 1] async def __anext__(self): if self.i >= len(self.parent.elems): raise StopAsyncIteration self.i += 1 return self.parent.elems[self.i - 1] class Iter: def __init__(self): self.elems = list(range(10)) def __iter__(self): return _InnerItter(self) def __aiter__(self): return _InnerItter(self) ...
Is it safe to create a Python class which is simultaneously sync and async iterator?
Is it safe/bad practice to make a class both iterator and async iterator? Example: import asyncio class Iter: def __init__(self): self.i = 0 self.elems = list(range(10)) def __iter__(self): return self def __aiter__(self): return self def __next__(self): if self.i >= len(self.elems): raise StopIteration self.i += 1 return self.elems[self.i - 1] async def __anext__(self): if self.i >= len(self.elems): raise StopAsyncIteration self.i += 1 return self.elems[self.i - 1] async def main(): print("async usage:") async for elem in Iter(): print(elem) print("sync usage:") for elem in Iter(): print(elem) try: asyncio.run(main()) except RuntimeError: await main() I surfed the net and didn't find anybody asking similar question or discussing the problem.
[ "The problem there is not the sync iterator: unless used in multi-threaded code it should be ok (but not otherwise).\nYour async code, however, keeps the state in a single instance, and if it is ever used in more than a task at once, the states will mix up.\n(Also, if you have a single task using it, but nests iterations - async or otherwise - in the same instance).\nIt is easily resolved by returning an auxiliar object that will contain your i counter, for each of __iter__ and __aiter__ - or simply return a generator with the counter in a closure - that is way easier than implementing __next__ and __anext__:\nclass Iter:\n def __init__(self):\n self.elems = list(range(10))\n \n def __iter__(self):\n return iter(self.elems)\n \n def __aiter__(self):\n async def _iter():\n yield from iter(self.elems)\n return _iter\n \n\n\nNow if you need some custom logic to Actually go into __next__, other than just sequentially yeld the values of a list, the associated iterator class logic can be of help:\n...\n\nclass _InnerItter:\n def __init__(self, parent):\n self.parent = parent\n self.i = 0\n \n def __next__(self):\n if self.i >= len(self.parent.elems):\n raise StopIteration\n self.i += 1\n return self.parent.elems[self.i - 1]\n \n async def __anext__(self):\n if self.i >= len(self.parent.elems):\n raise StopAsyncIteration\n self.i += 1\n return self.parent.elems[self.i - 1]\n\n\nclass Iter:\n def __init__(self):\n self.elems = list(range(10))\n \n def __iter__(self):\n return _InnerItter(self)\n \n def __aiter__(self):\n return _InnerItter(self)\n...\n\n\n" ]
[ 0 ]
[]
[]
[ "async_iterator", "iterator", "python", "python_3.x", "python_asyncio" ]
stackoverflow_0074510953_async_iterator_iterator_python_python_3.x_python_asyncio.txt
Q: invert binary tree in python with recursion I looked into the code of inverting binary tree in the internet. But I couldnt what it is doing. Its written in Python. I am a python programmer myself but couldnt understand it. The snippet is as follows: def invertTree(root): if root: root.left, root.right = invertTree(root.right), invertTree(root.left) return root I don't understand this root.left and root.right . Root is the main node in the graph, it will be an integer or a single character. But what does root.left represent in Python? I honestly do not get it. Update: My understanding is the node is access like below: class Node: def __init__(self, data): self.left = None self.right = None self.data = data def PrintTree(self): print(self.data) root = Node(10) root.PrintTree() A: First understand the problem with a diagram..! Q:- Given binary tree you have to convert binary tree into invert binary tree. Diagram Class TreeNode: {Initialization of the binary tree} The key insight here is to realize that in order to invert a binary tree we only need to recursively swap the children. To avoid using a tmp variable, we can do this pythonically by taking advantage of Python's tuple packing and unpacking and do a direct swap: > class TreeNode: > def __init__(self, val=0, left=None, right=None): > self.val = val > self.left = left > self.right = right > class Solution: > def invertTree(self, root): > if root: > root.left, root.right = self.invertTree(root.right), self.invertTree(root.left) > return root Note that if one of the children is null, the condition if root won't be triggered and the swap in the upper level will still be performed (e.g. node has right child but no left child, assigment will be root.left, root.right = root.right, null). what is root.left doing? Traversing the left subtree of the binary tree what is root.right doing? Traversing the right subtree of the binary tree. Note You can access values of root by root.val as you can see the class TreeNode. I have stored the value in val* Also to learn more about the binary trees basic questions you should do. (1)Preorder Traversal (2)Inorder Traversal (3)Postorder Traversal Updated Section.! Q-: How can the root be accessed from the other class.? A-: Just call it in the main function..! Main Function > if __name__="__main__": > s=input() > root=TreeNode(s) > Solution().invertTree(root) > inorderTraversal(root) # You can print in any other traversal also as the testcases requirements For more in detail see the driver code (starting from the 18th line {Below Link}): https://practice.geeksforgeeks.org/problems/mirror-tree/1 A: A binary tree can be used to store elements sorted by a key.(Look for the balanced binary tree for more advanced topic.) If the elements are sorted in ascending order, for any subtree T with the root node R, any element in the left branch of R should be smaller than any element in the right branch of R. This is a heavily modified version of the example code at Invert a Binary Tree by Shivali Bhadaniya. ''' 5 5 / \ / \ / \ / \ 3 7 =======> 7 3 / \ / \ / \ 1 4 6 6 4 1 ''' class Node: def __init__(self, data): self.left = self.right = None self.data = data def __repr__(self): return repr(self.data) def invert(root): left, right = root.left, root.right print(['visiting', root, 'swapping', left, right]) root.left, root.right = right, left if left: invert(left) if right: invert(right) root = Node(5) root.left = node3 = Node(3) root.right = node7 = Node(7) node3.left = Node(1) node3.right = Node(4) node7.left = Node(6) invert(root) This will output the following. ['visiting', 5, 'swapping', 3, 7] ['visiting', 3, 'swapping', 1, 4] ['visiting', 1, 'swapping', None, None] ['visiting', 4, 'swapping', None, None] ['visiting', 7, 'swapping', 6, None] ['visiting', 6, 'swapping', None, None] In the above example, the tree before calling invert() was sorted in ascending order. After the call, it will be sorted in descending order. That's why the operation is called 'inversion'. You can understand why the simple recursion of swaping the left child and the right child results in the inversion, by simulating the above example code with a pencil and paper manually. Compare logs with your calculation.
invert binary tree in python with recursion
I looked into the code of inverting binary tree in the internet. But I couldnt what it is doing. Its written in Python. I am a python programmer myself but couldnt understand it. The snippet is as follows: def invertTree(root): if root: root.left, root.right = invertTree(root.right), invertTree(root.left) return root I don't understand this root.left and root.right . Root is the main node in the graph, it will be an integer or a single character. But what does root.left represent in Python? I honestly do not get it. Update: My understanding is the node is access like below: class Node: def __init__(self, data): self.left = None self.right = None self.data = data def PrintTree(self): print(self.data) root = Node(10) root.PrintTree()
[ "First understand the problem with a diagram..!\nQ:- Given binary tree you have to convert binary tree into invert binary tree.\nDiagram\nClass TreeNode: {Initialization of the binary tree}\nThe key insight here is to realize that in order to invert a binary tree we only need to recursively swap the children. To avoid using a tmp variable, we can do this pythonically by taking advantage of Python's tuple packing and unpacking and do a direct swap:\n> class TreeNode:\n> def __init__(self, val=0, left=None, right=None):\n> self.val = val\n> self.left = left\n> self.right = right\n\n> class Solution:\n> def invertTree(self, root):\n> if root:\n> root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)\n> return root\n\nNote that if one of the children is null, the condition if root won't be triggered and the swap in the upper level will still be performed (e.g. node has right child but no left child, assigment will be root.left, root.right = root.right, null).\nwhat is root.left doing? Traversing the left subtree of the binary tree\nwhat is root.right doing? Traversing the right subtree of the binary tree.\nNote You can access values of root by root.val as you can see the class TreeNode. I have stored the value in val*\nAlso to learn more about the binary trees basic questions you should do.\n\n(1)Preorder Traversal\n\n\n(2)Inorder Traversal\n\n\n(3)Postorder Traversal\n\nUpdated Section.!\nQ-: How can the root be accessed from the other class.?\nA-: Just call it in the main function..!\nMain Function\n> if __name__=\"__main__\":\n> s=input()\n> root=TreeNode(s) \n> Solution().invertTree(root) \n> inorderTraversal(root) # You can print in any other traversal also as the testcases requirements\n\nFor more in detail see the driver code (starting from the 18th line {Below Link}):\nhttps://practice.geeksforgeeks.org/problems/mirror-tree/1\n", "A binary tree can be used to store elements sorted by a key.(Look for the balanced binary tree for more advanced topic.) If the elements are sorted in ascending order, for any subtree T with the root node R, any element in the left branch of R should be smaller than any element in the right branch of R.\nThis is a heavily modified version of the example code at Invert a Binary Tree by Shivali Bhadaniya.\n'''\n 5 5\n / \\ / \\\n / \\ / \\ \n 3 7 =======> 7 3 \n / \\ / \\ / \\\n 1 4 6 6 4 1 \n'''\nclass Node:\n def __init__(self, data):\n self.left = self.right = None\n self.data = data\n def __repr__(self):\n return repr(self.data)\n\ndef invert(root):\n left, right = root.left, root.right\n print(['visiting', root, 'swapping', left, right])\n root.left, root.right = right, left\n if left: invert(left)\n if right: invert(right)\n\nroot = Node(5)\nroot.left = node3 = Node(3)\nroot.right = node7 = Node(7)\nnode3.left = Node(1)\nnode3.right = Node(4)\nnode7.left = Node(6)\n\ninvert(root)\n\nThis will output the following.\n['visiting', 5, 'swapping', 3, 7]\n['visiting', 3, 'swapping', 1, 4]\n['visiting', 1, 'swapping', None, None]\n['visiting', 4, 'swapping', None, None]\n['visiting', 7, 'swapping', 6, None]\n['visiting', 6, 'swapping', None, None]\n\nIn the above example, the tree before calling invert() was sorted in ascending order. After the call, it will be sorted in descending order. That's why the operation is called 'inversion'.\nYou can understand why the simple recursion of swaping the left child and the right child results in the inversion, by simulating the above example code with a pencil and paper manually. Compare logs with your calculation.\n" ]
[ 1, 1 ]
[]
[]
[ "binary_tree", "data_structures", "python", "recursion" ]
stackoverflow_0074514686_binary_tree_data_structures_python_recursion.txt
Q: Treverse list of tuples to compare and report min, max My previous question was not understood, so I rephrase and post this one. I have a list of tuple for (class, n_class_examples) like this: my_list = (0, 126), (1, 192), (2, 330), (3, 952) ] So I am interested in generating a function, that takes in such a list, compare each tuple against all others, and in each case reports which class has smaller number of samples (min_class), and which has the larger number of samples (max_class). def get_min_max_class(current_list): for tn, tn+1: # tn -> 1-tuple, tn+1 any other tuple not tn if tn[1] < tn+1[1] smaller_class = tn[0] larger_class = tn+1[0] smaller_class = tn+1[0] larger_class = tn[0] return # smaller, larger of the 2 compared in each case So that: get_min_max_class(my_list) # would perform the comparison like so: (0, 126) v (1, 192) -> min_class = 0, max_class = 1 # in this case (0, 126) v (2, 330) -> min_class = 0, max_class = 2 # and in this case (0, 126) v (3, 952) -> min_class = 0, max_class = 3 # and here .. (1, 192) v (2, 330) -> min_class = 1, max_class = 2 # ... (1, 192) v (3, 952) -> min_class = 1, max_class = 3 (2, 330) v (3, 952) -> min_class = 2, max_class = 3 Forgive my definition of function, but I want the function to iteratively compare those items, each time, report which is larger and which is smaller. A: Iterate over the list of pairs generated by itertools.combintions, the process each pair individually using min and max. from itertools import combinations from operator import itemgetter first = itemgetter(0) second = itemgetter(1) def get_min_max_class(current_list): for pair in combinations(current_list, 2): p0, p1 = pair min_class = first(min(pair, key=second)) max_class = first(max(pair, key=second)) print(f'{p0} v {p1} -> min_class = {min_class}, max_class = {max_class}') get_min_max_class(my_list) If you want to return a list of results, rather than simply printing a report, you'll have to define what exactly you want to return. A: Python's sorted(), min(), and max() functions take as second argument a 'key' that let's you specify how to calculate the sorting for different objects, using lambda functions. In this case, you want to sort the tuples based on the value of the second element and return the corresponding first value. So, if I wanted the 'max' in your case I would do: max_class = max(my_list, key=lambda x: x[1])[-1] The lambda expression, if it's new to you, is saying "sort the item x in the list based on whatever you find at x[1]". Then, take the final element of the sorted list to get the class with the most samples, or whatever it was exactly. I hope that helps!
Treverse list of tuples to compare and report min, max
My previous question was not understood, so I rephrase and post this one. I have a list of tuple for (class, n_class_examples) like this: my_list = (0, 126), (1, 192), (2, 330), (3, 952) ] So I am interested in generating a function, that takes in such a list, compare each tuple against all others, and in each case reports which class has smaller number of samples (min_class), and which has the larger number of samples (max_class). def get_min_max_class(current_list): for tn, tn+1: # tn -> 1-tuple, tn+1 any other tuple not tn if tn[1] < tn+1[1] smaller_class = tn[0] larger_class = tn+1[0] smaller_class = tn+1[0] larger_class = tn[0] return # smaller, larger of the 2 compared in each case So that: get_min_max_class(my_list) # would perform the comparison like so: (0, 126) v (1, 192) -> min_class = 0, max_class = 1 # in this case (0, 126) v (2, 330) -> min_class = 0, max_class = 2 # and in this case (0, 126) v (3, 952) -> min_class = 0, max_class = 3 # and here .. (1, 192) v (2, 330) -> min_class = 1, max_class = 2 # ... (1, 192) v (3, 952) -> min_class = 1, max_class = 3 (2, 330) v (3, 952) -> min_class = 2, max_class = 3 Forgive my definition of function, but I want the function to iteratively compare those items, each time, report which is larger and which is smaller.
[ "Iterate over the list of pairs generated by itertools.combintions, the process each pair individually using min and max.\nfrom itertools import combinations\nfrom operator import itemgetter\n\nfirst = itemgetter(0)\nsecond = itemgetter(1)\n\ndef get_min_max_class(current_list):\n for pair in combinations(current_list, 2):\n p0, p1 = pair\n min_class = first(min(pair, key=second))\n max_class = first(max(pair, key=second))\n print(f'{p0} v {p1} -> min_class = {min_class}, max_class = {max_class}')\n\nget_min_max_class(my_list)\n\nIf you want to return a list of results, rather than simply printing a report, you'll have to define what exactly you want to return.\n", "Python's sorted(), min(), and max() functions take as second argument a 'key' that let's you specify how to calculate the sorting for different objects, using lambda functions. In this case, you want to sort the tuples based on the value of the second element and return the corresponding first value.\nSo, if I wanted the 'max' in your case I would do:\nmax_class = max(my_list, key=lambda x: x[1])[-1]\n\nThe lambda expression, if it's new to you, is saying \"sort the item x in the list based on whatever you find at x[1]\". Then, take the final element of the sorted list to get the class with the most samples, or whatever it was exactly.\nI hope that helps!\n" ]
[ 1, 0 ]
[]
[]
[ "list", "python", "python_3.x" ]
stackoverflow_0074534955_list_python_python_3.x.txt
Q: Keras backend switch combined with tf.where not working as intended I have a custom loss function where I want to change values from a one-hot based encoding to values in a certain range to calculate an IOU. Part of this code is to look at where I have a one in a tensor that has zeros otherwise. For this I am using tf.where which returns me the location. I have a vector of shape [batch_size,S1,S2,12] where I only care for the last dimension, thats why I take [...,2] of tf.where. Now it often happens that my prediction is all zeros because I have background events without any values in them and also my network will predict an all zero vector every now and then. This means tf.where will return an empty tensor. Thats why I want to use K.switch to check if the tensor is empty, because if it is I would like to have zeros returned. The problem is now that K.switch expects the shape of the then else options to have the same shape but I need my output to have shape [batch_size,S1,S2,1]. I have tried different things but I cant get this to work. I need to get zeros of shape [batch_size,S1,S2,1] or I need where_box1 to have [batch_size,S1,S2,1] with floats. The way its implemented now, K.switch returns an empty vector of zeros when where_box1_temp is empty, which is not what I want. When I use tf.zeros([batch_size,S1,S2,1]) instead it will complain that the conditions are of different shape when where_box1_temp is empty.... where_box1_temp = tf.where(y_pred[...,C+1:C+13])[...,2] where_box1 = K.switch(tf.equal(tf.size(where_box1_temp),0) , tf.zeros_like(where_box1_temp) , where_box1_temp) A: So I found a workaround, maybe this is helpful for someone else: where_box1_temp = tf.where(y_pred[...,C+1:C+13],[1,2,3,4,5,6,7,8,9,10,11,12],0) where_box1 = tf.reshape(K.sum(where_box1_temp,axis=3),[batch_size,5,5]) This allows me to have a tensor of my desired shape where all background/zero prediction values are 0 without having to use k.switch and having trouble with any empty dimensions or something like that.
Keras backend switch combined with tf.where not working as intended
I have a custom loss function where I want to change values from a one-hot based encoding to values in a certain range to calculate an IOU. Part of this code is to look at where I have a one in a tensor that has zeros otherwise. For this I am using tf.where which returns me the location. I have a vector of shape [batch_size,S1,S2,12] where I only care for the last dimension, thats why I take [...,2] of tf.where. Now it often happens that my prediction is all zeros because I have background events without any values in them and also my network will predict an all zero vector every now and then. This means tf.where will return an empty tensor. Thats why I want to use K.switch to check if the tensor is empty, because if it is I would like to have zeros returned. The problem is now that K.switch expects the shape of the then else options to have the same shape but I need my output to have shape [batch_size,S1,S2,1]. I have tried different things but I cant get this to work. I need to get zeros of shape [batch_size,S1,S2,1] or I need where_box1 to have [batch_size,S1,S2,1] with floats. The way its implemented now, K.switch returns an empty vector of zeros when where_box1_temp is empty, which is not what I want. When I use tf.zeros([batch_size,S1,S2,1]) instead it will complain that the conditions are of different shape when where_box1_temp is empty.... where_box1_temp = tf.where(y_pred[...,C+1:C+13])[...,2] where_box1 = K.switch(tf.equal(tf.size(where_box1_temp),0) , tf.zeros_like(where_box1_temp) , where_box1_temp)
[ "So I found a workaround, maybe this is helpful for someone else:\nwhere_box1_temp = tf.where(y_pred[...,C+1:C+13],[1,2,3,4,5,6,7,8,9,10,11,12],0)\n\nwhere_box1 = tf.reshape(K.sum(where_box1_temp,axis=3),[batch_size,5,5])\n\nThis allows me to have a tensor of my desired shape where all background/zero prediction values are 0 without having to use k.switch and having trouble with any empty dimensions or something like that.\n" ]
[ 0 ]
[]
[]
[ "keras", "python", "tensorflow" ]
stackoverflow_0074530610_keras_python_tensorflow.txt
Q: How to check if the parentheses and brackets are balanced? I need to write a function that given a string with parenthesis and/or square brackets it is able to evaluate if they appear in the correct order. For example, in this string '([b])(aa)' you can see that every time a parenthesis or square bracket is open, it is closed in the correct position. However, a string like '[(a])' it is not closing the parenthesis or square brackets in the correct order as it should be '[(a)]'. The function should return True or False depending on this correct position of both elements. I have tried the following code, but this logic seems to be infinite and it is not working if I have more than two parenthesis or square brackets opened. def parenthesis(string): for a in range(len(string)): if string[a] == "(": for b in range(a,len(string)): if string[b] == "[": for c in range(b,len(string)): if string[c] == "]": for d in range(c,len(string)): if string[d] == ")": return True elif string[b] == ")": return True else: return False If I run the function over the string "([b])(aa)" it is returning false as output. parenthesis("([b])(aa)") How can I rewrite this function so it evaluates all the parenthesis and square brackets combinations properly? A: This is one of the stack implementations I know: def is_balanced(s): stack = [] for char in s: if char == "(" or char == "{" or char == "[": stack.append(char) elif len(stack) <= 0: return False elif char == ")" and stack.pop() != "(": return False elif char == "]" and stack.pop() != "[": return False elif char == "}" and stack.pop() != "{": return False if len(stack) == 0: return True return False A: This version is more DRY than the prior answer: def is_balanced(parens: str) -> bool: # Link: https://stackoverflow.com/a/73341167/ parens_map ={'(':')','{':'}','[':']'} stack = [] for paren in parens: if paren in parens_map: # is open stack.append(paren) elif paren in parens_map.values(): # is close if (not stack) or (paren != parens_map[stack.pop()]): return False return not stack A: If a right parenthesis is open before a left, you got -1 and return False def is_balanced(string): cnt = 0 for char in string: if char == '(': cnt += 1 if char == ')': cnt -= 1 if cnt < 0: return False return True if cnt == 0 else False
How to check if the parentheses and brackets are balanced?
I need to write a function that given a string with parenthesis and/or square brackets it is able to evaluate if they appear in the correct order. For example, in this string '([b])(aa)' you can see that every time a parenthesis or square bracket is open, it is closed in the correct position. However, a string like '[(a])' it is not closing the parenthesis or square brackets in the correct order as it should be '[(a)]'. The function should return True or False depending on this correct position of both elements. I have tried the following code, but this logic seems to be infinite and it is not working if I have more than two parenthesis or square brackets opened. def parenthesis(string): for a in range(len(string)): if string[a] == "(": for b in range(a,len(string)): if string[b] == "[": for c in range(b,len(string)): if string[c] == "]": for d in range(c,len(string)): if string[d] == ")": return True elif string[b] == ")": return True else: return False If I run the function over the string "([b])(aa)" it is returning false as output. parenthesis("([b])(aa)") How can I rewrite this function so it evaluates all the parenthesis and square brackets combinations properly?
[ "This is one of the stack implementations I know:\ndef is_balanced(s):\n stack = []\n for char in s:\n if char == \"(\" or char == \"{\" or char == \"[\":\n stack.append(char) \n elif len(stack) <= 0:\n return False\n elif char == \")\" and stack.pop() != \"(\":\n return False\n elif char == \"]\" and stack.pop() != \"[\":\n return False\n elif char == \"}\" and stack.pop() != \"{\":\n return False\n if len(stack) == 0:\n return True\n return False\n\n", "This version is more DRY than the prior answer:\ndef is_balanced(parens: str) -> bool:\n # Link: https://stackoverflow.com/a/73341167/\n parens_map ={'(':')','{':'}','[':']'}\n stack = []\n for paren in parens:\n if paren in parens_map: # is open\n stack.append(paren)\n elif paren in parens_map.values(): # is close\n if (not stack) or (paren != parens_map[stack.pop()]):\n return False\n return not stack\n\n", "If a right parenthesis is open before a left, you got -1 and return False\ndef is_balanced(string):\n cnt = 0\n for char in string:\n if char == '(': cnt += 1\n if char == ')': cnt -= 1\n if cnt < 0: return False\n return True if cnt == 0 else False\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0072250748_python.txt
Q: Move surface with mouse motion in pygame I'm trying to move a surface represented by an image on disk with mouse motion in pygame, here's my code : import sys import pygame from pygame.locals import * WINDOW_SIZE = (600, 400) FPS = 60 class System: def __init__(self, screen, surface): self.screen = screen self.surface = pygame.transform.scale(surface, (WINDOW_SIZE[0] * 2, WINDOW_SIZE[1] * 2)) self.clock = pygame.time.Clock() def run(self): running = True moving = False offset_x = 0 offset_y = 0 while running: for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.quit() sys.exit() if event.key == pygame.K_SPACE: mouse_pos = pygame.mouse.get_pos() if self.surface.get_rect().collidepoint(mouse_pos): moving = True elif event.type == pygame.KEYUP: moving = False elif event.type == MOUSEMOTION and moving: offset_x = event.rel[0] offset_y = event.rel[1] elif event.type == MOUSEWHEEL: offset_y -= event.y * 50 self.screen.fill((105, 212, 229)) self.screen.blit(self.surface, (offset_x, offset_y)) pygame.display.update() self.clock.tick(FPS) def run_system(screen, surface): system = System(screen, surface) system.run() pygame.init() screen = pygame.display.set_mode(WINDOW_SIZE, 0, 32) surface = pygame.image.load('image.png') if __name__ == '__main__': run_system(screen, surface) The surface doesn't move properly. It shakes rapidly when I move mouse. I'm using event.rel to get relative movement. Maybe it is not the right way to do it. Have you another method ? P.S. : I also press space key to activate movement (moving variable) but it is not the problem, this works fine. A: I found my mistake : Replace : offset_x = event.rel[0] offset_y = event.rel[1] With : offset_x += event.rel[0] offset_y += event.rel[1]
Move surface with mouse motion in pygame
I'm trying to move a surface represented by an image on disk with mouse motion in pygame, here's my code : import sys import pygame from pygame.locals import * WINDOW_SIZE = (600, 400) FPS = 60 class System: def __init__(self, screen, surface): self.screen = screen self.surface = pygame.transform.scale(surface, (WINDOW_SIZE[0] * 2, WINDOW_SIZE[1] * 2)) self.clock = pygame.time.Clock() def run(self): running = True moving = False offset_x = 0 offset_y = 0 while running: for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.quit() sys.exit() if event.key == pygame.K_SPACE: mouse_pos = pygame.mouse.get_pos() if self.surface.get_rect().collidepoint(mouse_pos): moving = True elif event.type == pygame.KEYUP: moving = False elif event.type == MOUSEMOTION and moving: offset_x = event.rel[0] offset_y = event.rel[1] elif event.type == MOUSEWHEEL: offset_y -= event.y * 50 self.screen.fill((105, 212, 229)) self.screen.blit(self.surface, (offset_x, offset_y)) pygame.display.update() self.clock.tick(FPS) def run_system(screen, surface): system = System(screen, surface) system.run() pygame.init() screen = pygame.display.set_mode(WINDOW_SIZE, 0, 32) surface = pygame.image.load('image.png') if __name__ == '__main__': run_system(screen, surface) The surface doesn't move properly. It shakes rapidly when I move mouse. I'm using event.rel to get relative movement. Maybe it is not the right way to do it. Have you another method ? P.S. : I also press space key to activate movement (moving variable) but it is not the problem, this works fine.
[ "I found my mistake :\nReplace :\noffset_x = event.rel[0]\noffset_y = event.rel[1]\n\nWith :\noffset_x += event.rel[0]\noffset_y += event.rel[1]\n\n" ]
[ 0 ]
[]
[]
[ "motion", "mouse", "pygame", "python" ]
stackoverflow_0074534425_motion_mouse_pygame_python.txt
Q: How to use MINOS or SNOPT with GEKKO I am working on an optimization problem and I want to use MINOS or SNOPT solvers to find a solution to it. In the GEKKO website https://gekko.readthedocs.io/en/latest/overview.html , they mentioned that MINOS and SNOPT are available but with a commercial License how could I get this License? Now, I am using APOPT, but I have to use SNOPT A: Licenses for SNOPT and MINOS are available from Stanford Business Software, Inc. If you share benchmark information with your manager or professor, you may not need SNOPT. Testing on 494 benchmark problem shows that APOPT (MINLP) and IPOPT (NLP) beat the performance of SNOPT and MINOS. APOPT and IPOPT are freely available in Gekko. Hedengren, J.D., Mojica, J.L., Cole, W., Edgar, T.F., APOPT: MINLP Solver for Differential Algebraic Systems with Benchmark Testing, INFORMS Annual Meeting, Phoenix, AZ, Oct 2012. There is a free version of SNOPT available through the NEOS server but it requires AMPL format.
How to use MINOS or SNOPT with GEKKO
I am working on an optimization problem and I want to use MINOS or SNOPT solvers to find a solution to it. In the GEKKO website https://gekko.readthedocs.io/en/latest/overview.html , they mentioned that MINOS and SNOPT are available but with a commercial License how could I get this License? Now, I am using APOPT, but I have to use SNOPT
[ "Licenses for SNOPT and MINOS are available from Stanford Business Software, Inc. If you share benchmark information with your manager or professor, you may not need SNOPT. Testing on 494 benchmark problem shows that APOPT (MINLP) and IPOPT (NLP) beat the performance of SNOPT and MINOS. APOPT and IPOPT are freely available in Gekko.\n\n\nHedengren, J.D., Mojica, J.L., Cole, W., Edgar, T.F., APOPT: MINLP Solver for Differential Algebraic Systems with Benchmark Testing, INFORMS Annual Meeting, Phoenix, AZ, Oct 2012.\n\nThere is a free version of SNOPT available through the NEOS server but it requires AMPL format.\n" ]
[ 0 ]
[]
[]
[ "gekko", "python" ]
stackoverflow_0074526311_gekko_python.txt
Q: How do i extract this object from his background in opencv? For my internship, I am trying to extract this type of aluminum wires of the acquired vision camera footage. The purpose is to extract those connections and classify them with machine learning. My idea is to extract those connections to remove all the noise (background) and analyze the gray value density of the bond, a fall in a gray value plot will indicate a broken wire. I am very new with vision and I tried to really dive in edge detection and segmentation. My problem is that I cannot totally remove the noise what results in the following edge detection with the Canny operator. The Sobel operator result in too much noise. This is the best what I achieved in the last days, hopefully you can help me with preprocessing this image before the Canny Operator but also tips in capturing the object can help. Because of limitations in the space and process where this vision is taken, physical additions to the vision camera is difficult but is appreciated, so still comment under what I can do to improve the acquisition. My code: import numpy as np from PIL import Image import cv2 blur = 3 canny_low = 15 canny_high = 230 min_area = 0.005 max_area = 0.025 dilate_iter = 10 erode_iter = 10 mask_color = (0.0,0.0,0.0) image1 = cv2.imread(r"C:/Users/User/Pictures/vlcsnap-2022-11-21-15h59m05s146.png") image2 = cv2.imread(r"C:/Users/User/Pictures/template2.png") # function for object extraction from background def bgRemoval_seg(source, template): global blur, canny_low, canny_high, min_area, max_area, dilate_iter, erode_iter, mask_color # change source image and template in gray c source = cv2.cvtColor(source, cv2.COLOR_BGR2GRAY) template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) # add gaussian filter for smooth blur source = cv2.GaussianBlur(source, (blur,blur), 0) template = cv2.GaussianBlur(template, (blur,blur), 0) # add bilateral Filter ro remove A LOT of noise ( I tried various values with this filter) source = cv2.bilateralFilter(source,7,100,100) template = cv2.bilateralFilter(template,7,100,100) # add adaptive contrast that increases the amount of contours around the object (also increases noise) clahe = cv2.createCLAHE(clipLimit=3.7, tileGridSize=(4,4)) source = clahe.apply(source) template = clahe.apply(template) cv2.imshow("contrast", source) cv2.imshow("contrast2", template) # apply Canny Operator for edge detection edges1 = cv2.Canny(source, canny_low, canny_high) edges2 = cv2.Canny(template, canny_low, canny_high) #dilate and erode the image to remove more noise edges1 = cv2.dilate(edges1, None) edges2 = cv2.dilate(edges2, None) edges1 = cv2.erode(edges1, None) edges2 = cv2.erode(edges2, None) edges1 = np.array(edges1) edges2 = np.array(edges2) cv2.imshow("edges1", edges1) cv2.imshow("edges2", edges2) # get the contours and their areas contour_info_1 = [(c, cv2.contourArea(c),) for c in cv2.findContours(edges1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]] contour_info_2 = [(c, cv2.contourArea(c),) for c in cv2.findContours(edges2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]] # Get the area of the image as a comparison image_area = source.shape[0] * source.shape[1] # calculate max and min areas in terms of pixels max_area = max_area * image_area min_area = min_area * image_area # Set up mask with a matrix of 0's mask1 = np.zeros(edges1.shape, dtype = np.uint8) # Go through and find relevant contours and apply to mask for i in range(0,len(contour_info_1)): # Instead of worrying about all the smaller contours, if the area is smaller than the min, the loop will break contour1 = contour_info_1[i] if contour1[1] > min_area and contour1[1] < max_area: # Add contour to mask mask1 = cv2.fillConvexPoly(mask1, contour1[0], (255)) # use dilate, erode, and blur to smooth out the mask mask = mask1 mask = cv2.dilate(mask, None, iterations=dilate_iter) mask = cv2.erode(mask, None, iterations=erode_iter) mask = cv2.GaussianBlur(mask, (blur,blur), 0) mask = np.array(mask) # Ensures data types match up mask_color = np.array(mask_color) mask_color = np.reshape(mask_color,[1,3]) mask = mask.astype('float32') / 255.0 source= source.astype('float32') / 255.0 # Blend the image and the mask masked = (mask * source) masked = (masked * 255).astype('uint8') return masked while(True): # Get Region of interest x,y,w,h = cv2.selectROI(image1) # Recommended values for the crop # X: 145 , Y: 292 , W: 1035 , H: 445 # Crop image and use same crop for template imageCrop1 = image1[int(y):int(y+h), int(x):int(x+w)] print(x,y,w,h) imageCrop2 = image2[int(y):int(y+h), int(x):int(x+w)] # Display the resulting frame cv2.imshow("Foreground Canny ",bgRemoval_seg(imageCrop1, imageCrop2)) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture cv2.destroyAllWindows() Original image:original image After filters (First is original, second is template,how it needs to look): Original: bilateral filters and adaptive contrast filter Template: template bilateral filters and adaptive contrast filter Canny Operation: Original: a lot of noise caused by shadows but also background noise Template (the ultimate goal):template canny operation Object extraction from background: end result I hope you can help me! I already tried different filters. Furthermore, I tried Grabcut algorithm. I also did some thresholding but stopped early, not dived in to that. I also tried division with the Gaussian filters, but the result maintained the same. A: As noted in the comments, this is a tricky problem, because the image has lots of edges that you don't care about, and it's hard to filter by color, either. However, there is one feature which I think could be helpful, which is the blur. Specifically, the wire is focus, and the rest of the shot is not. You could exploit this fact using a Laplacian filter. A Laplacian filter is usually used to detect edges by looking at where the filter crosses zero. However, it also can be used to detect blur by finding regions where the values of the filter are small across a wide area. To get the entire wire, I use a Gaussian smoothing filter after the Laplacian filter, which smears the high values across the width of the wire. Then, the value is thresholded. import cv2 import matplotlib.pyplot as plt import numpy as np import scipy.ndimage image = cv2.imread('test192_img.png') laplacian_spread_distance = 15 # distance to spread laplacian in pixels wire_threshold = 110 # Out of 255. Higher values mean less of the image is kept. gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) fm = np.abs(cv2.Laplacian(gray, cv2.CV_64F)) fm = scipy.ndimage.gaussian_filter(fm, sigma=lp_spread_distance) fm /= fm.max() / 255 fm = fm.astype('uint8') ret2, thresholded = cv2.threshold(fm, wire_threshold, 1, cv2.THRESH_BINARY) extracted = thresholded.reshape(image.shape[:2] + (1,)) * image Output from this filter: This method assumes that the wire is in focus. That assumption might not be justified if you have an auto-focus camera, or if the distance between the camera and wire is changing.
How do i extract this object from his background in opencv?
For my internship, I am trying to extract this type of aluminum wires of the acquired vision camera footage. The purpose is to extract those connections and classify them with machine learning. My idea is to extract those connections to remove all the noise (background) and analyze the gray value density of the bond, a fall in a gray value plot will indicate a broken wire. I am very new with vision and I tried to really dive in edge detection and segmentation. My problem is that I cannot totally remove the noise what results in the following edge detection with the Canny operator. The Sobel operator result in too much noise. This is the best what I achieved in the last days, hopefully you can help me with preprocessing this image before the Canny Operator but also tips in capturing the object can help. Because of limitations in the space and process where this vision is taken, physical additions to the vision camera is difficult but is appreciated, so still comment under what I can do to improve the acquisition. My code: import numpy as np from PIL import Image import cv2 blur = 3 canny_low = 15 canny_high = 230 min_area = 0.005 max_area = 0.025 dilate_iter = 10 erode_iter = 10 mask_color = (0.0,0.0,0.0) image1 = cv2.imread(r"C:/Users/User/Pictures/vlcsnap-2022-11-21-15h59m05s146.png") image2 = cv2.imread(r"C:/Users/User/Pictures/template2.png") # function for object extraction from background def bgRemoval_seg(source, template): global blur, canny_low, canny_high, min_area, max_area, dilate_iter, erode_iter, mask_color # change source image and template in gray c source = cv2.cvtColor(source, cv2.COLOR_BGR2GRAY) template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) # add gaussian filter for smooth blur source = cv2.GaussianBlur(source, (blur,blur), 0) template = cv2.GaussianBlur(template, (blur,blur), 0) # add bilateral Filter ro remove A LOT of noise ( I tried various values with this filter) source = cv2.bilateralFilter(source,7,100,100) template = cv2.bilateralFilter(template,7,100,100) # add adaptive contrast that increases the amount of contours around the object (also increases noise) clahe = cv2.createCLAHE(clipLimit=3.7, tileGridSize=(4,4)) source = clahe.apply(source) template = clahe.apply(template) cv2.imshow("contrast", source) cv2.imshow("contrast2", template) # apply Canny Operator for edge detection edges1 = cv2.Canny(source, canny_low, canny_high) edges2 = cv2.Canny(template, canny_low, canny_high) #dilate and erode the image to remove more noise edges1 = cv2.dilate(edges1, None) edges2 = cv2.dilate(edges2, None) edges1 = cv2.erode(edges1, None) edges2 = cv2.erode(edges2, None) edges1 = np.array(edges1) edges2 = np.array(edges2) cv2.imshow("edges1", edges1) cv2.imshow("edges2", edges2) # get the contours and their areas contour_info_1 = [(c, cv2.contourArea(c),) for c in cv2.findContours(edges1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]] contour_info_2 = [(c, cv2.contourArea(c),) for c in cv2.findContours(edges2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]] # Get the area of the image as a comparison image_area = source.shape[0] * source.shape[1] # calculate max and min areas in terms of pixels max_area = max_area * image_area min_area = min_area * image_area # Set up mask with a matrix of 0's mask1 = np.zeros(edges1.shape, dtype = np.uint8) # Go through and find relevant contours and apply to mask for i in range(0,len(contour_info_1)): # Instead of worrying about all the smaller contours, if the area is smaller than the min, the loop will break contour1 = contour_info_1[i] if contour1[1] > min_area and contour1[1] < max_area: # Add contour to mask mask1 = cv2.fillConvexPoly(mask1, contour1[0], (255)) # use dilate, erode, and blur to smooth out the mask mask = mask1 mask = cv2.dilate(mask, None, iterations=dilate_iter) mask = cv2.erode(mask, None, iterations=erode_iter) mask = cv2.GaussianBlur(mask, (blur,blur), 0) mask = np.array(mask) # Ensures data types match up mask_color = np.array(mask_color) mask_color = np.reshape(mask_color,[1,3]) mask = mask.astype('float32') / 255.0 source= source.astype('float32') / 255.0 # Blend the image and the mask masked = (mask * source) masked = (masked * 255).astype('uint8') return masked while(True): # Get Region of interest x,y,w,h = cv2.selectROI(image1) # Recommended values for the crop # X: 145 , Y: 292 , W: 1035 , H: 445 # Crop image and use same crop for template imageCrop1 = image1[int(y):int(y+h), int(x):int(x+w)] print(x,y,w,h) imageCrop2 = image2[int(y):int(y+h), int(x):int(x+w)] # Display the resulting frame cv2.imshow("Foreground Canny ",bgRemoval_seg(imageCrop1, imageCrop2)) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture cv2.destroyAllWindows() Original image:original image After filters (First is original, second is template,how it needs to look): Original: bilateral filters and adaptive contrast filter Template: template bilateral filters and adaptive contrast filter Canny Operation: Original: a lot of noise caused by shadows but also background noise Template (the ultimate goal):template canny operation Object extraction from background: end result I hope you can help me! I already tried different filters. Furthermore, I tried Grabcut algorithm. I also did some thresholding but stopped early, not dived in to that. I also tried division with the Gaussian filters, but the result maintained the same.
[ "As noted in the comments, this is a tricky problem, because the image has lots of edges that you don't care about, and it's hard to filter by color, either. However, there is one feature which I think could be helpful, which is the blur. Specifically, the wire is focus, and the rest of the shot is not.\nYou could exploit this fact using a Laplacian filter. A Laplacian filter is usually used to detect edges by looking at where the filter crosses zero. However, it also can be used to detect blur by finding regions where the values of the filter are small across a wide area. To get the entire wire, I use a Gaussian smoothing filter after the Laplacian filter, which smears the high values across the width of the wire. Then, the value is thresholded.\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.ndimage\n\nimage = cv2.imread('test192_img.png')\n\nlaplacian_spread_distance = 15 # distance to spread laplacian in pixels\nwire_threshold = 110 # Out of 255. Higher values mean less of the image is kept.\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nfm = np.abs(cv2.Laplacian(gray, cv2.CV_64F))\nfm = scipy.ndimage.gaussian_filter(fm, sigma=lp_spread_distance)\nfm /= fm.max() / 255\nfm = fm.astype('uint8')\nret2, thresholded = cv2.threshold(fm, wire_threshold, 1, cv2.THRESH_BINARY)\nextracted = thresholded.reshape(image.shape[:2] + (1,)) * image\n\nOutput from this filter:\n\nThis method assumes that the wire is in focus. That assumption might not be justified if you have an auto-focus camera, or if the distance between the camera and wire is changing.\n" ]
[ 1 ]
[]
[]
[ "canny_operator", "computer_vision", "extract", "opencv", "python" ]
stackoverflow_0074531551_canny_operator_computer_vision_extract_opencv_python.txt
Q: Guys, can someone give a hand how could I change this to a For loop, please? MyList = [tuple(i for i in j if type(i) != str ) for j in MyList] result is a tuple inside list, for example: [(X,Y), (X2,Y2)] A: What is relevant here is understanding list comprehension, which you want to reverse. This sounds to me like some form of course homework to make sure you understand what's going on in that line ;-) MyList = [(1,2,3,(1,2), "hello world"),("hello"),(3,4,1),"world"] comp = [tuple(i for i in j if type(i) != str ) for j in MyList] print(comp) newList = [] for j in MyList: newTup = [] for i in j: if type(i) is not str: newTup.append(i) newList.append(tuple(newTup)) print(newList) fun = list(map(lambda j: tuple(filter(lambda i: type(i) is not str,j)) , MyList)) print(fun)
Guys, can someone give a hand how could I change this to a For loop, please?
MyList = [tuple(i for i in j if type(i) != str ) for j in MyList] result is a tuple inside list, for example: [(X,Y), (X2,Y2)]
[ "What is relevant here is understanding list comprehension, which you want to reverse.\nThis sounds to me like some form of course homework to make sure you understand what's going on in that line ;-)\nMyList = [(1,2,3,(1,2), \"hello world\"),(\"hello\"),(3,4,1),\"world\"]\n\ncomp = [tuple(i for i in j if type(i) != str ) for j in MyList] \nprint(comp)\n\nnewList = []\nfor j in MyList:\n newTup = []\n for i in j:\n if type(i) is not str:\n newTup.append(i)\n newList.append(tuple(newTup))\nprint(newList)\n\nfun = list(map(lambda j: tuple(filter(lambda i: type(i) is not str,j)) , MyList))\nprint(fun)\n\n" ]
[ 0 ]
[]
[]
[ "for_loop", "list", "list_comprehension", "python", "tuples" ]
stackoverflow_0074535056_for_loop_list_list_comprehension_python_tuples.txt
Q: How create a unittest that will test if method was called for specific class with specific argument? Here is my code module_a.py class Parent(object): def __init__(self) -> None: pass def send(self): print('We send some message here') # send self.message class Child(Parent): def __init__(self, message): self.message = message super(Child, self).__init__() module_b.py from module_a import Child def some_function(): # do something Child('Some Message Here').send() # do something Is there any way to test that .send() was called for Child not for parent and self.message inside .send() equals to some value. Thanks UPD: I am asking about writing a unit test and my main problem is how to patch /mock that in the correct way. A: Sure. You can check the Type of self (your Object) eg: class Parent(object): def __init__(self) -> None: pass def send(self): print(type(self)) # send self.message class Child(Parent): def __init__(self, message): self.message = message super(Child, self).__init__() Child('Some Message Here').send() Parent().send() results in: <class '__main__.Child'> <class '__main__.Parent'> if you want to Check if the calling Object is a child use isinstance which will give you a boolean. Although watch out, this only works for Child Checks, as a Child Class is also an instance of a Parent Class! isinstance(self, Child) A: Mocking your test case using patch. The mocked class can be checked for the call_count or even by the arguments the function of the class is called with i.e. assert_called_with. In your case Child.send. Since you are mocking the Child class, it confirms that the attribute send comes from it and not from the Parent class. from module_b import some_function import unittest from unittest.mock import patch class TestSomeFunc(unittest.TestCase): @patch("module_b.Child.send") def test_some_func(self, mock_send): some_function() self.assertEqual(mock_send.call_count, 1) In case to check for the mocked class, it can be adapted and mocked as follows: from module_b import some_function import unittest from unittest.mock import patch class TestSomeFunc(unittest.TestCase): @patch("module_b.Child") def test_some_func(self, mock_child): some_function() mock_child.assert_called_with("Some Message Here") A: Changes to the code of your file module_a.py I have completed your method send() and I think that the modifications are in compliance with your comment ('send self.message'): in the method send() I have introduced the instruction instanceof(). I show below my code for the file module_a.py: class Parent(object): def __init__(self) -> None: pass def send(self): print(type(self)) if isinstance(self, Child): # send self.message print("send Child: " + str(self.message)) else: # for Parent doesn't exist self.message print("send Parent!") class Child(Parent): def __init__(self, message): self.message = message super(Child, self).__init__() Test file module_b.py In my opinion is not necessary use patch for your test. At base of this consideration is that the method send() is defined only in the Parent, so it is available for both instance of Child and Parent. But this is only my opinion.. In the same folder of module_a.py I write 2 tests in test file module_b.py: in the first test it is created sut_child, an istance of class Child, and invoked its method send(); after that I check the value of self.message and that the object sut_child is of type Child in the second test it is create sut_parent, an istance of class Parent, and invoked its method send(); after that I check that the object sut_parent is of type Parent Below the code of test file module_b.py: from module_a import Child, Parent import unittest from unittest.mock import patch class TestSomeFunc(unittest.TestCase): def test_send_func_child(self): sut_child = Child('Some Message Here') sut_child.send() self.assertEqual('Some Message Here', sut_child.message) self.assertTrue(isinstance(sut_child, Child)) def test_send_func_parent(self): sut_parent = Parent() sut_parent.send() self.assertFalse(isinstance(sut_parent, Child)) if __name__ == '__main__': unittest.main() Output of execution of module_b.py If you execute module_b.py the output is: # output of test_send_func_child: <class 'module_a.Child'> send Child: Some Message Here # output of test_send_func_parent <class 'module_a.Parent'> send Parent! # output for the execution of 2 tests (the assert are correct) Ran 2 tests in 0.000s OK
How create a unittest that will test if method was called for specific class with specific argument?
Here is my code module_a.py class Parent(object): def __init__(self) -> None: pass def send(self): print('We send some message here') # send self.message class Child(Parent): def __init__(self, message): self.message = message super(Child, self).__init__() module_b.py from module_a import Child def some_function(): # do something Child('Some Message Here').send() # do something Is there any way to test that .send() was called for Child not for parent and self.message inside .send() equals to some value. Thanks UPD: I am asking about writing a unit test and my main problem is how to patch /mock that in the correct way.
[ "Sure. You can check the Type of self (your Object)\neg:\n class Parent(object):\n def __init__(self) -> None:\n pass\n\n def send(self):\n print(type(self))\n # send self.message\n\nclass Child(Parent):\n def __init__(self, message):\n self.message = message\n super(Child, self).__init__()\n\n\n\nChild('Some Message Here').send()\n\nParent().send()\n\nresults in:\n<class '__main__.Child'>\n<class '__main__.Parent'>\n\nif you want to Check if the calling Object is a child use isinstance which will give you a boolean. Although watch out, this only works for Child Checks, as a Child Class is also an instance of a Parent Class!\nisinstance(self, Child)\n\n", "Mocking your test case using patch. The mocked class can be checked for the call_count or even by the arguments the function of the class is called with i.e. assert_called_with. In your case Child.send.\nSince you are mocking the Child class, it confirms that the attribute send comes from it and not from the Parent class.\nfrom module_b import some_function\nimport unittest\nfrom unittest.mock import patch\n\n\nclass TestSomeFunc(unittest.TestCase):\n @patch(\"module_b.Child.send\")\n def test_some_func(self, mock_send):\n some_function()\n self.assertEqual(mock_send.call_count, 1)\n\nIn case to check for the mocked class, it can be adapted and mocked as follows:\nfrom module_b import some_function\nimport unittest\nfrom unittest.mock import patch\n\n\nclass TestSomeFunc(unittest.TestCase):\n @patch(\"module_b.Child\")\n def test_some_func(self, mock_child):\n some_function()\n mock_child.assert_called_with(\"Some Message Here\")\n\n", "Changes to the code of your file module_a.py\nI have completed your method send() and I think that the modifications are in compliance with your comment ('send self.message'): in the method send() I have introduced the instruction instanceof().\nI show below my code for the file module_a.py:\nclass Parent(object):\n def __init__(self) -> None:\n pass\n\n def send(self):\n print(type(self))\n if isinstance(self, Child):\n # send self.message\n print(\"send Child: \" + str(self.message))\n else:\n # for Parent doesn't exist self.message\n print(\"send Parent!\")\n\nclass Child(Parent):\n def __init__(self, message):\n self.message = message\n super(Child, self).__init__()\n\n\nTest file module_b.py\nIn my opinion is not necessary use patch for your test. At base of this consideration is that the method send() is defined only in the Parent, so it is available for both instance of Child and Parent. But this is only my opinion..\nIn the same folder of module_a.py I write 2 tests in test file module_b.py:\n\nin the first test it is created sut_child, an istance of class Child, and invoked its method send(); after that I check the value of self.message and that the object sut_child is of type Child\n\nin the second test it is create sut_parent, an istance of class Parent, and invoked its method send(); after that I check that the object sut_parent is of type Parent\n\n\nBelow the code of test file module_b.py:\nfrom module_a import Child, Parent\nimport unittest\nfrom unittest.mock import patch\n\nclass TestSomeFunc(unittest.TestCase):\n\n def test_send_func_child(self):\n sut_child = Child('Some Message Here')\n sut_child.send()\n self.assertEqual('Some Message Here', sut_child.message)\n self.assertTrue(isinstance(sut_child, Child))\n\n def test_send_func_parent(self):\n sut_parent = Parent()\n sut_parent.send()\n self.assertFalse(isinstance(sut_parent, Child))\n\nif __name__ == '__main__':\n unittest.main()\n\nOutput of execution of module_b.py\nIf you execute module_b.py the output is:\n# output of test_send_func_child:\n<class 'module_a.Child'>\nsend Child: Some Message Here\n\n# output of test_send_func_parent\n<class 'module_a.Parent'>\nsend Parent!\n\n# output for the execution of 2 tests (the assert are correct)\nRan 2 tests in 0.000s\n\nOK\n\n" ]
[ 1, 1, 0 ]
[]
[]
[ "mocking", "python", "python_unittest" ]
stackoverflow_0074530758_mocking_python_python_unittest.txt
Q: How do I change directory back to my original working directory with Python? I have a function that resembles the one below. I'm not sure how to use the os module to get back to my original working directory at the conclusion of the jar's execution. def run(): owd = os.getcwd() #first change dir to build_dir path os.chdir(testDir) #run jar from test directory os.system(cmd) #change dir back to original working directory (owd) note: I think my code formatting is off - not sure why. My apologies in advance A: A context manager is a very appropriate tool for this job: from contextlib import contextmanager @contextmanager def cwd(path): oldpwd = os.getcwd() os.chdir(path) try: yield finally: os.chdir(oldpwd) ...used as: os.chdir('/tmp') # for testing purposes, be in a known directory print(f'before context manager: {os.getcwd()}') with cwd('/'): # code inside this block, and only inside this block, is in the new directory print(f'inside context manager: {os.getcwd()}') print(f'after context manager: {os.getcwd()}') ...which will yield something like: before context manager: /tmp inside context manager: / after context manager: /tmp This is actually superior to the cd - shell builtin, inasmuch as it also takes care of changing directories back when a block is exited due to an exception being thrown. For your specific use case, this would instead be: with cwd(testDir): os.system(cmd) Another option to consider is using subprocess.call() instead of os.system(), which will let you specify a working directory for the command to run: # note: better to modify this to not need shell=True if possible subprocess.call(cmd, cwd=testDir, shell=True) ... which would prevent you from needing to change the interpreter's directory at all. Note that now it is recommended to use subprocess.run (instead of call) but the same arguments are available, and in particular cwd: https://docs.python.org/3/library/subprocess.html#using-the-subprocess-module. A: You simply need to add the line: os.chdir(owd) Just a note this was also answered in your other question. A: The advice to use os.chdir(owd) is good. It would be wise to put the code which needs the changed directory in a try:finally block (or in python 2.6 and later, a with: block.) That reduces the risk that you will accidentally put a return in the code before the change back to the original directory. def run(): owd = os.getcwd() try: #first change dir to build_dir path os.chdir(testDir) #run jar from test directory os.system(cmd) finally: #change dir back to original working directory (owd) os.chdir(owd) A: A context-manager is overkill for this situation (executing a system command). The best solution is to use the subprocess module instead (Python 2.4 onwards) and the run or popen methods with the cwd argument. So, your code can be replaced with: def run(): #run jar from test directory subprocess.run(cmd, cwd=testDir) See https://bugs.python.org/issue25625 and https://docs.python.org/3/library/subprocess.html#subprocess-replacements. A: os.chdir(owd) should do the trick (like you've done when changing to testDir) A: Python is case sensitive so when typing the path make sure it's the same as the directory you want to set. import os os.getcwd() os.chdir('C:\\') A: I looked around the answers on StackOverflow and eventually decided to write my own decorator for this purpose: from collections.abc import Callable from functools import wraps from typing import ParamSpec, TypeVar T = TypeVar('T') P = ParamSpec('P') def enter_subdir(subdir: str) -> Callable[[Callable[P, T]], Callable[P, T]]: """During the execution of a function, temporarily enter a subdirectory.""" def decorator(function: Callable[P, T]) -> Callable[P, T]: @wraps(function) def wrapper(*args, **kwargs) -> T: os.makedirs(subdir, exist_ok=True) os.chdir(subdir) result = function(*args, **kwargs) os.chdir("..") return result return wrapper return decorator A: Python 3.11 update: You can now simply use contextlib.chdir from stdlib. It changes the directory when entering the block, it then restores the old directory back: from contextlib import chdir from os import getcwd print(f"Before: {getcwd()}") with chdir("/"): print(f"inside: {getcwd()}") print(f"after: {getcwd()}")
How do I change directory back to my original working directory with Python?
I have a function that resembles the one below. I'm not sure how to use the os module to get back to my original working directory at the conclusion of the jar's execution. def run(): owd = os.getcwd() #first change dir to build_dir path os.chdir(testDir) #run jar from test directory os.system(cmd) #change dir back to original working directory (owd) note: I think my code formatting is off - not sure why. My apologies in advance
[ "A context manager is a very appropriate tool for this job:\nfrom contextlib import contextmanager\n\n@contextmanager\ndef cwd(path):\n oldpwd = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(oldpwd)\n\n...used as:\nos.chdir('/tmp') # for testing purposes, be in a known directory\nprint(f'before context manager: {os.getcwd()}')\nwith cwd('/'):\n # code inside this block, and only inside this block, is in the new directory\n print(f'inside context manager: {os.getcwd()}')\nprint(f'after context manager: {os.getcwd()}')\n\n...which will yield something like:\nbefore context manager: /tmp\ninside context manager: /\nafter context manager: /tmp\n\nThis is actually superior to the cd - shell builtin, inasmuch as it also takes care of changing directories back when a block is exited due to an exception being thrown.\n\nFor your specific use case, this would instead be:\nwith cwd(testDir):\n os.system(cmd)\n\n\nAnother option to consider is using subprocess.call() instead of os.system(), which will let you specify a working directory for the command to run:\n# note: better to modify this to not need shell=True if possible\nsubprocess.call(cmd, cwd=testDir, shell=True)\n\n... which would prevent you from needing to change the interpreter's directory at all.\nNote that now it is recommended to use subprocess.run (instead of call) but the same arguments are available, and in particular cwd: https://docs.python.org/3/library/subprocess.html#using-the-subprocess-module.\n", "You simply need to add the line:\nos.chdir(owd)\n\nJust a note this was also answered in your other question.\n", "The advice to use os.chdir(owd) is good. It would be wise to put the code which needs the changed directory in a try:finally block (or in python 2.6 and later, a with: block.) That reduces the risk that you will accidentally put a return in the code before the change back to the original directory.\ndef run(): \n owd = os.getcwd()\n try:\n #first change dir to build_dir path\n os.chdir(testDir)\n #run jar from test directory\n os.system(cmd)\n finally:\n #change dir back to original working directory (owd)\n os.chdir(owd)\n\n", "A context-manager is overkill for this situation (executing a system command). The best solution is to use the subprocess module instead (Python 2.4 onwards) and the run or popen methods with the cwd argument.\nSo, your code can be replaced with:\ndef run(): \n #run jar from test directory\n subprocess.run(cmd, cwd=testDir)\n\nSee https://bugs.python.org/issue25625 and https://docs.python.org/3/library/subprocess.html#subprocess-replacements.\n", "os.chdir(owd) should do the trick (like you've done when changing to testDir)\n", "Python is case sensitive so when typing the path make sure it's the same as the directory\nyou want to set.\nimport os\n\nos.getcwd()\n\nos.chdir('C:\\\\')\n\n", "I looked around the answers on StackOverflow and eventually decided to write my own decorator for this purpose:\nfrom collections.abc import Callable\nfrom functools import wraps\nfrom typing import ParamSpec, TypeVar\n\n\nT = TypeVar('T')\nP = ParamSpec('P')\n \n \ndef enter_subdir(subdir: str) -> Callable[[Callable[P, T]], Callable[P, T]]:\n \"\"\"During the execution of a function, temporarily enter a subdirectory.\"\"\"\n\n def decorator(function: Callable[P, T]) -> Callable[P, T]:\n @wraps(function)\n def wrapper(*args, **kwargs) -> T:\n os.makedirs(subdir, exist_ok=True)\n os.chdir(subdir)\n result = function(*args, **kwargs)\n os.chdir(\"..\")\n return result\n\n return wrapper\n\n return decorator\n\n", "Python 3.11 update:\nYou can now simply use contextlib.chdir from stdlib. It changes the directory when entering the block, it then restores the old directory back:\nfrom contextlib import chdir\nfrom os import getcwd\n\nprint(f\"Before: {getcwd()}\")\nwith chdir(\"/\"):\n print(f\"inside: {getcwd()}\")\nprint(f\"after: {getcwd()}\")\n\n" ]
[ 64, 34, 16, 3, 2, 2, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0000299446_python.txt
Q: TypeError: __init__() got an unexpected keyword argument 'model_list' Getting the error like TypeError: init() got an unexpected keyword argument 'model_list' When I am running following script: from autots import AutoTS model_list = ['LastValueNaive','GLS','ETS','AverageValueNaive',] model = AutoTS(\ forecast_length=49,\ frequency='infer',\ prediction_interval=0.95,\ ensemble=\['simple', 'horizontal-min'\],\ max_generations=5,\ num_validations=2,\ validation_method='seasonal 168',\ model_list=model_list,\ transformer_list='all',\ models_to_validate=0.2,\ drop_most_recent=1,\ n_jobs='auto', ) I have taken the script from the AutoTS documentation only.I have n't changed anything but getting error. A: Try without making strings in model_list like so: model_list = [LastValueNaive,GLS,ETS,AverageValueNaive] This approach worked for me when using neuralforecast, mlforecast from Nixtla. Maybe it works for you too.
TypeError: __init__() got an unexpected keyword argument 'model_list'
Getting the error like TypeError: init() got an unexpected keyword argument 'model_list' When I am running following script: from autots import AutoTS model_list = ['LastValueNaive','GLS','ETS','AverageValueNaive',] model = AutoTS(\ forecast_length=49,\ frequency='infer',\ prediction_interval=0.95,\ ensemble=\['simple', 'horizontal-min'\],\ max_generations=5,\ num_validations=2,\ validation_method='seasonal 168',\ model_list=model_list,\ transformer_list='all',\ models_to_validate=0.2,\ drop_most_recent=1,\ n_jobs='auto', ) I have taken the script from the AutoTS documentation only.I have n't changed anything but getting error.
[ "Try without making strings in model_list like so:\nmodel_list = [LastValueNaive,GLS,ETS,AverageValueNaive]\n\nThis approach worked for me when using neuralforecast, mlforecast from Nixtla. Maybe it works for you too.\n" ]
[ 0 ]
[]
[]
[ "forecasting", "python" ]
stackoverflow_0071672448_forecasting_python.txt
Q: Separate text in cells and decompose into different columns depending on the content how can i take date from rec event and from visit event and put it in different columns? as you can see, there can be more than 2 events (not only rec and visit). Moreover, they can be interchanged i have DF df = pd.DataFrame({'event': ['rec - 2022-11-13 21:07:51, visit - 2022-11-16 10:01:01', 'visit - 2022-11-14 15:34:28, rec - 2022-11-12 09:03:58', 'rec - 2022-11-13 15:13:16, visit - 2022-11-14 12:15:01', 'rec - 2022-11-17 15:13:17, call - 2022-11-20 12:11:01 visit - 2022-11-21 12:15:01'], 'id':[17, 34, 36, 11]}) i need DF df1 = pd.DataFrame({'rec': ['2022-11-13', '2022-11-12', '2022-11-13', '2022-11-17'], 'visit': ['2022-11-16', '2022-11-14', '2022-11-14', '2022-11-21'], 'id': [17, 34, 36, 11]}) i can't just split(', ')[0] and split(', ')[-1] A: Here is what worked for me, I needed to apply several splits given the input data: import pandas as pd df = pd.DataFrame({'event': ['rec - 2022-11-13 21:07:51, visit - 2022-11-16 10:01:01', 'visit - 2022-11-14 15:34:28, rec - 2022-11-12 09:03:58', 'rec - 2022-11-13 15:13:16, visit - 2022-11-14 12:15:01', 'rec - 2022-11-17 15:13:17, call - 2022-11-20 12:11:01 visit - 2022-11-21 12:15:01'], 'id':[17, 34, 36, 11]}) liste = df['event'].str.split(',', 1).tolist() rec = [] visit = [] for lst in liste: rec.append([i.strip() for i in lst if i.strip().startswith('rec')]) visit.append([i.strip() for i in lst if not i.strip().startswith('rec')]) rec = [i[0].split('rec - ')[1].split(' ')[0] for i in rec] visit = [i[0].split('visit - ')[1].split(' ')[0] for i in visit] df['rec'] = rec df['visit'] = visit
Separate text in cells and decompose into different columns depending on the content
how can i take date from rec event and from visit event and put it in different columns? as you can see, there can be more than 2 events (not only rec and visit). Moreover, they can be interchanged i have DF df = pd.DataFrame({'event': ['rec - 2022-11-13 21:07:51, visit - 2022-11-16 10:01:01', 'visit - 2022-11-14 15:34:28, rec - 2022-11-12 09:03:58', 'rec - 2022-11-13 15:13:16, visit - 2022-11-14 12:15:01', 'rec - 2022-11-17 15:13:17, call - 2022-11-20 12:11:01 visit - 2022-11-21 12:15:01'], 'id':[17, 34, 36, 11]}) i need DF df1 = pd.DataFrame({'rec': ['2022-11-13', '2022-11-12', '2022-11-13', '2022-11-17'], 'visit': ['2022-11-16', '2022-11-14', '2022-11-14', '2022-11-21'], 'id': [17, 34, 36, 11]}) i can't just split(', ')[0] and split(', ')[-1]
[ "Here is what worked for me, I needed to apply several splits given the input data:\nimport pandas as pd\n\n\ndf = pd.DataFrame({'event': ['rec - 2022-11-13 21:07:51, visit - 2022-11-16 10:01:01',\n 'visit - 2022-11-14 15:34:28, rec - 2022-11-12 09:03:58',\n 'rec - 2022-11-13 15:13:16, visit - 2022-11-14 12:15:01',\n 'rec - 2022-11-17 15:13:17, call - 2022-11-20 12:11:01 visit - 2022-11-21 12:15:01'],\n 'id':[17, 34, 36, 11]})\n\nliste = df['event'].str.split(',', 1).tolist()\n\nrec = []\nvisit = []\nfor lst in liste:\n rec.append([i.strip() for i in lst if i.strip().startswith('rec')])\n visit.append([i.strip() for i in lst if not i.strip().startswith('rec')])\n\nrec = [i[0].split('rec - ')[1].split(' ')[0] for i in rec]\nvisit = [i[0].split('visit - ')[1].split(' ')[0] for i in visit]\ndf['rec'] = rec\ndf['visit'] = visit\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python", "split" ]
stackoverflow_0074535072_pandas_python_split.txt
Q: Multiprocessing messes up logging to file Issue Multiprocessing messes up logging to file: Lines already written may be removed New lines may not be written Order of lines may be incorrect Logging works fine if I don't use multiprocessing. I read that I can use a QueueHandler, but I want to understand why writing logging to some handler still messes up another handler. Code to reproduce Set USE_MP=False or uncomment exit() to verify that logging works fine as long as multiprocessing lines are not executed. import logging import multiprocessing import time from multiprocessing import Pool from typing import Dict, List USE_MP = True logger = logging.getLogger() logger.addHandler(logging.FileHandler(filename="test.log", mode="w")) logger.setLevel(logging.DEBUG) mplogger = multiprocessing.log_to_stderr() mplogger.addHandler(logging.StreamHandler()) mplogger.setLevel(logging.DEBUG) def time_consuming_function(file_name): logger.info(f"Running time_consuming_function with {file_name}") time.sleep(1) return file_name, file_name def mp(file_names: List[str]) -> Dict[str, str]: logger.info(f"Running mp...") with Pool() as p: return { file_name: file_name for file_name, file_name in p.imap_unordered( time_consuming_function, file_names ) } def non_mp(file_names: List[str]) -> Dict[str, str]: logger.info(f"Running non-mp...") return { file_name: file_name for file_name, file_name in map(time_consuming_function, file_names) } def main(): logger.info("Start run...") file_names = list("ABCDE") # exit() if USE_MP: mp(file_names) else: non_mp(file_names) logger.info("End run.") if __name__ == "__main__": main() Python version: 3.7 A: I want to understand why writing logging to some handler still messes up another handler. It's explained here in the Python documentation, assuming that you're talking about file-related handlers. You can use QueueHandler with a QueueListener, or you can use a SocketHandler with a suitable listener. The documentation contains info on running a logging socket listener in production and even has a link to a Gist with files you can download and experiment with / adapt to your needs.
Multiprocessing messes up logging to file
Issue Multiprocessing messes up logging to file: Lines already written may be removed New lines may not be written Order of lines may be incorrect Logging works fine if I don't use multiprocessing. I read that I can use a QueueHandler, but I want to understand why writing logging to some handler still messes up another handler. Code to reproduce Set USE_MP=False or uncomment exit() to verify that logging works fine as long as multiprocessing lines are not executed. import logging import multiprocessing import time from multiprocessing import Pool from typing import Dict, List USE_MP = True logger = logging.getLogger() logger.addHandler(logging.FileHandler(filename="test.log", mode="w")) logger.setLevel(logging.DEBUG) mplogger = multiprocessing.log_to_stderr() mplogger.addHandler(logging.StreamHandler()) mplogger.setLevel(logging.DEBUG) def time_consuming_function(file_name): logger.info(f"Running time_consuming_function with {file_name}") time.sleep(1) return file_name, file_name def mp(file_names: List[str]) -> Dict[str, str]: logger.info(f"Running mp...") with Pool() as p: return { file_name: file_name for file_name, file_name in p.imap_unordered( time_consuming_function, file_names ) } def non_mp(file_names: List[str]) -> Dict[str, str]: logger.info(f"Running non-mp...") return { file_name: file_name for file_name, file_name in map(time_consuming_function, file_names) } def main(): logger.info("Start run...") file_names = list("ABCDE") # exit() if USE_MP: mp(file_names) else: non_mp(file_names) logger.info("End run.") if __name__ == "__main__": main() Python version: 3.7
[ "\nI want to understand why writing logging to some handler still messes up another handler.\n\nIt's explained here in the Python documentation, assuming that you're talking about file-related handlers. You can use QueueHandler with a QueueListener, or you can use a SocketHandler with a suitable listener. The documentation contains info on running a logging socket listener in production and even has a link to a Gist with files you can download and experiment with / adapt to your needs.\n" ]
[ 0 ]
[]
[]
[ "logging", "multiprocessing", "python", "python_3.x" ]
stackoverflow_0074530205_logging_multiprocessing_python_python_3.x.txt
Q: merge rows with reversed columns I have dataframe, and I would like to merge the rows that has the same value in reversed columns. An example as below: Column1 Column2 A B B A C D D C E F Expected results: Column1 Column2 A B C D E F As the file has less than 50 lines (though I have 1000 files), I tried some codes use iterrows as followed: for index, row in df.iterrows(): output = [] row_rev = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])] row_rev_index = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])].index() if row_rev.any(): print(df[min([index, row_rev_index])]) output.append(df[min([index, row_rev_index])]) # always print out the first line of the reciprocal lines but it complains that row_rev_index = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])].index() TypeError: 'Int64Index' object is not callable A: Change row_rev_index = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])].index() to row_rev_index = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])].index or even shorter row_rev_index = row_rev.index A: This may be what you are looking for: df = df.groupby(df.apply(lambda x: tuple(set(x)),axis=1)).first()
merge rows with reversed columns
I have dataframe, and I would like to merge the rows that has the same value in reversed columns. An example as below: Column1 Column2 A B B A C D D C E F Expected results: Column1 Column2 A B C D E F As the file has less than 50 lines (though I have 1000 files), I tried some codes use iterrows as followed: for index, row in df.iterrows(): output = [] row_rev = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])] row_rev_index = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])].index() if row_rev.any(): print(df[min([index, row_rev_index])]) output.append(df[min([index, row_rev_index])]) # always print out the first line of the reciprocal lines but it complains that row_rev_index = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])].index() TypeError: 'Int64Index' object is not callable
[ "Change\nrow_rev_index = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])].index()\n\nto\nrow_rev_index = df[(df['Column1'] == row['Column2']) & (df['Column2'] == row['Column1'])].index\n\nor even shorter\nrow_rev_index = row_rev.index\n\n", "This may be what you are looking for:\ndf = df.groupby(df.apply(lambda x: tuple(set(x)),axis=1)).first()\n\n" ]
[ 0, 0 ]
[]
[]
[ "dataframe", "python" ]
stackoverflow_0074535305_dataframe_python.txt
Q: cannot import name 'pad_sequences' from 'keras.preprocessing.sequence' i'm trying to import these : from numpy import array from keras.preprocessing.text import one_hot from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers.core import Activation, Dropout, Dense from keras.layers import Flatten, LSTM from keras.layers import GlobalMaxPooling1D from keras.models import Model But i'm getting error as cannot import name 'pad_sequences' from 'keras.preprocessing.sequence' Can anyone help me here please? A: Replace: from keras.preprocessing.sequence import pad_sequences With: from keras_preprocessing.sequence import pad_sequences A: you can use this. It is worked for me. from tensorflow.keras.preprocessing.sequence import pad_sequences A: According to the TensorFlow v2.10.0 doc, the correct path to pad_sequences is tf.keras.utils.pad_sequences. So in your script one should write: from keras.utils import pad_sequences It has resolved the problem for me. A: most likely you are using tf version 2.9 - go back to 2.8 and the same path works alternatively import it from keras.utils.data_utils import pad_sequences TF is not so stable with paths - the best way is check their git source corresponding to the version you succeeded to install !! in the case of TF2.9 you can see how it is importedhere A: The correct path to import is keras.io.preprocessing.sequence.pad_sequences. Your path lacks the io. from keras.io.preprocessing.sequence import pad_sequences A: I came across the same problem just now but still don't know what is going on(still waiting for an answer). I gave up importing pad_sequences and write it in full and it works keras.preprocessing.sequence.pad_sequences() A: In their last update Kiras 2.11.0 they made few changes and improvements to their packages. Considering your issue you should: replace this: from keras.preprocessing.sequence import pad_sequences with this: from keras_preprocessing.sequence import pad_sequences
cannot import name 'pad_sequences' from 'keras.preprocessing.sequence'
i'm trying to import these : from numpy import array from keras.preprocessing.text import one_hot from keras.preprocessing.sequence import pad_sequences from keras.models import Sequential from keras.layers.core import Activation, Dropout, Dense from keras.layers import Flatten, LSTM from keras.layers import GlobalMaxPooling1D from keras.models import Model But i'm getting error as cannot import name 'pad_sequences' from 'keras.preprocessing.sequence' Can anyone help me here please?
[ "Replace:\nfrom keras.preprocessing.sequence import pad_sequences\n\nWith:\nfrom keras_preprocessing.sequence import pad_sequences\n\n", "you can use this. It is worked for me.\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n", "According to the TensorFlow v2.10.0 doc, the correct path to pad_sequences is tf.keras.utils.pad_sequences. So in your script one should write:\nfrom keras.utils import pad_sequences\n\nIt has resolved the problem for me.\n", "most likely you are using tf version 2.9 - go back to 2.8 and the same path works\nalternatively import it from keras.utils.data_utils import pad_sequences\nTF is not so stable with paths - the best way is check their git source corresponding to the version you succeeded to install !! in the case of TF2.9 you can see how it is importedhere\n", "The correct path to import is keras.io.preprocessing.sequence.pad_sequences. Your path lacks the io.\nfrom keras.io.preprocessing.sequence import pad_sequences\n\n", "I came across the same problem just now but still don't know what is going on(still waiting for an answer).\nI gave up importing pad_sequences and write it in full and it works\nkeras.preprocessing.sequence.pad_sequences()\n\n", "In their last update Kiras 2.11.0 they made few changes and improvements to their packages.\nConsidering your issue you should:\n\nreplace this:\n\nfrom keras.preprocessing.sequence import pad_sequences\n\nwith this:\n\nfrom keras_preprocessing.sequence import pad_sequences\n\n" ]
[ 34, 5, 4, 2, 1, 1, 0 ]
[]
[]
[ "keras", "python", "python_import" ]
stackoverflow_0072326025_keras_python_python_import.txt
Q: Using plt.savefig over a for loop of iterated plots returns blank image Ok, so I apologize if this has been asked before, but I am running into some issues with trying to execute plt.savefig on some plots I want to save to a certain directory on my computer. I currently have 481 plots that I generated through the following code: ID=np.array(table['ID']) My ID array comes from an isolated column from an astropy table which I made into an array to match the format of my other variables. path=str('/Volumes/Desktop/Folder') filename=str('/Filename_ID_') outfilename=path+filename outfilename yra = [0, 3] def PLOTS(M,P,Match,Mean,P25,P50,P75): ax=plt.subplot(); return (plt.figure(), ax.plot(M,P,'b',lw=2), ax.plot((Match*np.ones(2)), yra, 'red',lw=3, label='match'), ax.plot((P25*np.ones(2)), yra, 'b--', label='P25'), ax.plot(P50*np.ones(2), yra, 'c--', label='P50',linewidth=4), ax.plot(P75*np.ones(2), yra, 'r--', label='P75'), ax.plot(Mean*np.ones(2), yra, 'darkgoldenrod',label='mean'), ax.set_xlabel('M',fontsize=14), ax.set_ylabel('P',fontsize=14), ax.legend(fontsize=10,loc='upper right'), plt.savefig(outfilename+str(ID)+'.pdf'))); for i in range(len(M)): print(PLOTS(M, P[i],Matches[i],Mean[i],P25[i],P50[i],P75[i])) plt.savefig(outfilename+str(ID[i])+'.pdf') This outputs my plots but when I go check my Folder in my desktop the image is blank. I know that if you execute plt.show() before your plt.savefig() then that is the reason why you get a blank image, but I am not executing plt.show() at all. I have tried to add the plt.show() command after the plt.savefig commands on both my function and my for loop but I still come up with a blank image. I have also checked out other posts similar to mine such as How do I save a new graph as png with every iteration of a loop but this still did not help with my issue. I really appreciate any help you guys may provide and again apologize if this question has been asked. A: You shoud try to add plt.close() the line after plt.savefig(outfilename+str(ID[i])+'.pdf')
Using plt.savefig over a for loop of iterated plots returns blank image
Ok, so I apologize if this has been asked before, but I am running into some issues with trying to execute plt.savefig on some plots I want to save to a certain directory on my computer. I currently have 481 plots that I generated through the following code: ID=np.array(table['ID']) My ID array comes from an isolated column from an astropy table which I made into an array to match the format of my other variables. path=str('/Volumes/Desktop/Folder') filename=str('/Filename_ID_') outfilename=path+filename outfilename yra = [0, 3] def PLOTS(M,P,Match,Mean,P25,P50,P75): ax=plt.subplot(); return (plt.figure(), ax.plot(M,P,'b',lw=2), ax.plot((Match*np.ones(2)), yra, 'red',lw=3, label='match'), ax.plot((P25*np.ones(2)), yra, 'b--', label='P25'), ax.plot(P50*np.ones(2), yra, 'c--', label='P50',linewidth=4), ax.plot(P75*np.ones(2), yra, 'r--', label='P75'), ax.plot(Mean*np.ones(2), yra, 'darkgoldenrod',label='mean'), ax.set_xlabel('M',fontsize=14), ax.set_ylabel('P',fontsize=14), ax.legend(fontsize=10,loc='upper right'), plt.savefig(outfilename+str(ID)+'.pdf'))); for i in range(len(M)): print(PLOTS(M, P[i],Matches[i],Mean[i],P25[i],P50[i],P75[i])) plt.savefig(outfilename+str(ID[i])+'.pdf') This outputs my plots but when I go check my Folder in my desktop the image is blank. I know that if you execute plt.show() before your plt.savefig() then that is the reason why you get a blank image, but I am not executing plt.show() at all. I have tried to add the plt.show() command after the plt.savefig commands on both my function and my for loop but I still come up with a blank image. I have also checked out other posts similar to mine such as How do I save a new graph as png with every iteration of a loop but this still did not help with my issue. I really appreciate any help you guys may provide and again apologize if this question has been asked.
[ "You shoud try to add plt.close() the line after plt.savefig(outfilename+str(ID[i])+'.pdf')\n" ]
[ 0 ]
[]
[]
[ "matplotlib", "python" ]
stackoverflow_0062037161_matplotlib_python.txt
Q: why is iterating over a Numpy array faster than direct operations I wanted to find out if it is significantly slower to iterate over the first two dimensions of an array in comparison to doing the operations columnwise. To my surprise if found out that its actually faster to do the operations elementwise. Can someone explain? Here is the code: def row_by_row(arr, cop): for i in range(arr.shape[0]): for ii in range(arr.shape[1]): arr[i, ii] = cop[i, ii].copy() return arr def all(arr, cop): for i in range(arr.shape[1]): arr[:,i] = cop[:, i].copy() return arr print(timeit.timeit("row_by_row(arr, cop)", setup="arr=np.ones((26, 15, 5000)); cop = np.random.random((26, 15,5000))",number=50, globals=globals())) print(timeit.timeit("all(arr, cop)",setup="arr=np.ones((26, 15, 5000)); cop=np.random.random((26, 15,5000))", number=50, globals=globals())) this was the time: 0.12496590000000007 0.4989047 A: Short Answer: Memory Allocation Long Answer: As the commenters in the question point out, the measure results seem very unreliable. Increasing the number of operations for the measurement to 2000 gives more steady results Row: 3.519135099995765 All: 5.321293300003163 One thing which certainly impacts the performance is how arrays are stored in the memory and how many cache hits / misses we have. def matrix(arr, cop): for i in range(arr.shape[0]): arr[i] = cop[i].copy() return arr This is a bit better in performance than copying "columns" Matrix: 4.6333566999965115 It is still slower though than going through it row by row. Why? For this, let's take one step back from the loop def just_copy(arr, cop): return cop.copy() Copy: 5.482903500000248 In just copying the whole thing, we're slower again! I would assume, the cause for it being faster to loop through the arrays is mostly memory allocation. There may also be some additional overhead of copying NumPy structures. A: Because all is really cache-inefficient by iterating columns instead of rows. Data in numpy arrays are stored by dimensions - rows, then columns, 3rd dim, etc. If we read a row, it will be a sequential segment of memory that can be efficiently cached. If we read by column, it is a few bytes here, skip a few KB, than read a few more bytes, etc - which causes a lot of cache misses. The problem gets more pronounced if we increase 3rd dimension, e.g. to 50K. Read by rows, as opposed to columns, eliminates the difference: def all_by_rows(arr, cop): for row in range(arr.shape[0]): arr[row, :] = cop[row, :].copy() return arr timeit with 50k third dimension: 1.249532633984927 # row_by_row - which is actually by third dimension 2.0826793879969046 # all 1.3391598959860858 # all_by_rows Without unnecessary .copy(), as pointed out by Marco: 1.0241080590058118 0.9834478280099574 0.6739323509973474
why is iterating over a Numpy array faster than direct operations
I wanted to find out if it is significantly slower to iterate over the first two dimensions of an array in comparison to doing the operations columnwise. To my surprise if found out that its actually faster to do the operations elementwise. Can someone explain? Here is the code: def row_by_row(arr, cop): for i in range(arr.shape[0]): for ii in range(arr.shape[1]): arr[i, ii] = cop[i, ii].copy() return arr def all(arr, cop): for i in range(arr.shape[1]): arr[:,i] = cop[:, i].copy() return arr print(timeit.timeit("row_by_row(arr, cop)", setup="arr=np.ones((26, 15, 5000)); cop = np.random.random((26, 15,5000))",number=50, globals=globals())) print(timeit.timeit("all(arr, cop)",setup="arr=np.ones((26, 15, 5000)); cop=np.random.random((26, 15,5000))", number=50, globals=globals())) this was the time: 0.12496590000000007 0.4989047
[ "Short Answer:\nMemory Allocation\nLong Answer:\nAs the commenters in the question point out, the measure results seem very unreliable. Increasing the number of operations for the measurement to 2000 gives more steady results\n\nRow: 3.519135099995765\n\n\nAll: 5.321293300003163\n\nOne thing which certainly impacts the performance is how arrays are stored in the memory and how many cache hits / misses we have.\ndef matrix(arr, cop):\n\n for i in range(arr.shape[0]):\n arr[i] = cop[i].copy()\n\n return arr\n\nThis is a bit better in performance than copying \"columns\"\n\nMatrix: 4.6333566999965115\n\nIt is still slower though than going through it row by row. Why?\nFor this, let's take one step back from the loop\ndef just_copy(arr, cop):\n return cop.copy()\n\n\nCopy: 5.482903500000248\n\nIn just copying the whole thing, we're slower again!\nI would assume, the cause for it being faster to loop through the arrays is mostly memory allocation. There may also be some additional overhead of copying NumPy structures.\n", "Because all is really cache-inefficient by iterating columns instead of rows.\nData in numpy arrays are stored by dimensions - rows, then columns, 3rd dim, etc. If we read a row, it will be a sequential segment of memory that can be efficiently cached. If we read by column, it is a few bytes here, skip a few KB, than read a few more bytes, etc - which causes a lot of cache misses. The problem gets more pronounced if we increase 3rd dimension, e.g. to 50K.\nRead by rows, as opposed to columns, eliminates the difference:\ndef all_by_rows(arr, cop):\n for row in range(arr.shape[0]):\n arr[row, :] = cop[row, :].copy()\n return arr\n\ntimeit with 50k third dimension:\n1.249532633984927 # row_by_row - which is actually by third dimension\n2.0826793879969046 # all\n1.3391598959860858 # all_by_rows\n\nWithout unnecessary .copy(), as pointed out by Marco:\n1.0241080590058118\n0.9834478280099574\n0.6739323509973474\n\n" ]
[ 2, 2 ]
[]
[]
[ "algorithm", "arrays", "numpy", "python" ]
stackoverflow_0074534076_algorithm_arrays_numpy_python.txt
Q: `staticmethod` and `abc.abstractmethod`: Will it blend? In my Python app I want to make a method that is both a staticmethod and an abc.abstractmethod. How do I do this? I tried applying both decorators, but it doesn't work. If I do this: import abc class C(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod @staticmethod def my_function(): pass I get an exception*, and if I do this: class C(object): __metaclass__ = abc.ABCMeta @staticmethod @abc.abstractmethod def my_function(): pass The abstract method is not enforced. How can I make an abstract static method? *The exception: File "c:\Python26\Lib\abc.py", line 29, in abstractmethod funcobj.__isabstractmethod__ = True AttributeError: 'staticmethod' object has no attribute '__isabstractmethod__' A: Starting with Python 3.3, it is possible to combine @staticmethod and @abstractmethod, so none of the other suggestions are necessary anymore: @staticmethod @abstractmethod def my_abstract_staticmethod(...): Further @abstractstatic is deprecated since version 3.3. A: class abstractstatic(staticmethod): __slots__ = () def __init__(self, function): super(abstractstatic, self).__init__(function) function.__isabstractmethod__ = True __isabstractmethod__ = True class A(object): __metaclass__ = abc.ABCMeta @abstractstatic def test(): print 5 A: This will do it: >>> import abc >>> abstractstaticmethod = abc.abstractmethod >>> >>> class A(object): ... __metaclass__ = abc.ABCMeta ... @abstractstaticmethod ... def themethod(): ... pass ... >>> a = A() >>> Traceback (most recent call last): File "asm.py", line 16, in <module> a = A() TypeError: Can't instantiate abstract class A with abstract methods test You go "Eh? It just renames @abstractmethod", and this is completely correct. Because any subclass of the above will have to include the @staticmethod decorator anyway. You have no need of it here, except as documentation when reading the code. A subclass would have to look like this: >>> class B(A): ... @staticmethod ... def themethod(): ... print "Do whatevs" To have a function that would enforce you to make this method a static method you would have to subclass ABCmeta to check for that and enforce it. That's a lot of work for no real return. (If somebody forgets the @staticmethod decorator they will get a clear error anyway, it just won't mention static methods. So in fact this works just as well: >>> import abc >>> >>> class A(object): ... __metaclass__ = abc.ABCMeta ... @abc.abstractmethod ... def themethod(): ... """Subclasses must implement this as a @staticmethod""" ... pass Update - Another way to explain it: That a method is static controls how it is called. An abstract method is never called. And abstract static method is therefore a pretty pointless concept, except for documentation purposes. A: This is currently not possible in Python 2.X, which will only enforce the method to be abstract or static, but not both. In Python 3.2+, the new decoratorsabc.abstractclassmethod and abc.abstractstaticmethod were added to combine their enforcement of being abstract and static or abstract and a class method. See Python Issue 5867 A: The documentation says below: When abstractmethod() is applied in combination with other method descriptors, it should be applied as the innermost decorator, ... So, @abstractmethod must be the innermost decorator as shown below: from abc import ABC, abstractmethod class Person(ABC): @classmethod @abstractmethod # The innermost decorator def test1(cls): pass @staticmethod @abstractmethod # The innermost decorator def test2(): pass @property @abstractmethod # The innermost decorator def name(self): pass @name.setter @abstractmethod # The innermost decorator def name(self, name): pass @name.deleter @abstractmethod # The innermost decorator def name(self): pass Then, you need to override them in the child class as shown below: class Student(Person): def __init__(self, name): self._name = name @classmethod def test1(cls): # Overrides abstract class method print("Test1") @staticmethod def test2(): # Overrides abstract static method print("Test2") @property def name(self): # Overrides abstract getter return self._name @name.setter def name(self, name): # Overrides abstract setter self._name = name @name.deleter def name(self): # Overrides abstract deleter del self._name Then, you can instantiate the child class and call them as shown below: obj = Student("John") # Instantiates "Student" class obj.test1() # Class method obj.test2() # Static method print(obj.name) # Getter obj.name = "Tom" # Setter print(obj.name) # Getter del obj.name # Deleter print(hasattr(obj, "name")) Output: Test1 Test2 John Tom False You can see my answer which explains about abstract property.
`staticmethod` and `abc.abstractmethod`: Will it blend?
In my Python app I want to make a method that is both a staticmethod and an abc.abstractmethod. How do I do this? I tried applying both decorators, but it doesn't work. If I do this: import abc class C(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod @staticmethod def my_function(): pass I get an exception*, and if I do this: class C(object): __metaclass__ = abc.ABCMeta @staticmethod @abc.abstractmethod def my_function(): pass The abstract method is not enforced. How can I make an abstract static method? *The exception: File "c:\Python26\Lib\abc.py", line 29, in abstractmethod funcobj.__isabstractmethod__ = True AttributeError: 'staticmethod' object has no attribute '__isabstractmethod__'
[ "Starting with Python 3.3, it is possible to combine @staticmethod and @abstractmethod, so none of the other suggestions are necessary anymore:\n@staticmethod\n@abstractmethod\ndef my_abstract_staticmethod(...):\n\nFurther @abstractstatic is deprecated since version 3.3.\n", "class abstractstatic(staticmethod):\n __slots__ = ()\n def __init__(self, function):\n super(abstractstatic, self).__init__(function)\n function.__isabstractmethod__ = True\n __isabstractmethod__ = True\n\nclass A(object):\n __metaclass__ = abc.ABCMeta\n @abstractstatic\n def test():\n print 5\n\n", "This will do it:\n >>> import abc\n >>> abstractstaticmethod = abc.abstractmethod\n >>>\n >>> class A(object):\n ... __metaclass__ = abc.ABCMeta\n ... @abstractstaticmethod\n ... def themethod():\n ... pass\n ... \n >>> a = A()\n >>> Traceback (most recent call last):\n File \"asm.py\", line 16, in <module>\n a = A()\n TypeError: Can't instantiate abstract class A with abstract methods test\n\nYou go \"Eh? It just renames @abstractmethod\", and this is completely correct. Because any subclass of the above will have to include the @staticmethod decorator anyway. You have no need of it here, except as documentation when reading the code. A subclass would have to look like this:\n >>> class B(A):\n ... @staticmethod\n ... def themethod():\n ... print \"Do whatevs\"\n\nTo have a function that would enforce you to make this method a static method you would have to subclass ABCmeta to check for that and enforce it. That's a lot of work for no real return. (If somebody forgets the @staticmethod decorator they will get a clear error anyway, it just won't mention static methods. \nSo in fact this works just as well:\n >>> import abc\n >>>\n >>> class A(object):\n ... __metaclass__ = abc.ABCMeta\n ... @abc.abstractmethod\n ... def themethod():\n ... \"\"\"Subclasses must implement this as a @staticmethod\"\"\"\n ... pass\n\nUpdate - Another way to explain it:\nThat a method is static controls how it is called.\nAn abstract method is never called.\nAnd abstract static method is therefore a pretty pointless concept, except for documentation purposes.\n", "This is currently not possible in Python 2.X, which will only enforce the method to be abstract or static, but not both.\nIn Python 3.2+, the new decoratorsabc.abstractclassmethod and abc.abstractstaticmethod were added to combine their enforcement of being abstract and static or abstract and a class method.\nSee Python Issue 5867\n", "The documentation says below:\n\nWhen abstractmethod() is applied in combination with other method\ndescriptors, it should be applied as the innermost decorator, ...\n\nSo, @abstractmethod must be the innermost decorator as shown below:\nfrom abc import ABC, abstractmethod\n\nclass Person(ABC):\n\n @classmethod\n @abstractmethod # The innermost decorator\n def test1(cls):\n pass\n \n @staticmethod\n @abstractmethod # The innermost decorator\n def test2():\n pass\n\n @property\n @abstractmethod # The innermost decorator\n def name(self):\n pass\n\n @name.setter\n @abstractmethod # The innermost decorator\n def name(self, name):\n pass\n\n @name.deleter\n @abstractmethod # The innermost decorator\n def name(self):\n pass\n\nThen, you need to override them in the child class as shown below:\nclass Student(Person):\n \n def __init__(self, name):\n self._name = name\n \n @classmethod\n def test1(cls): # Overrides abstract class method\n print(\"Test1\")\n \n @staticmethod\n def test2(): # Overrides abstract static method\n print(\"Test2\")\n \n @property\n def name(self): # Overrides abstract getter\n return self._name\n \n @name.setter\n def name(self, name): # Overrides abstract setter\n self._name = name\n \n @name.deleter\n def name(self): # Overrides abstract deleter\n del self._name\n\nThen, you can instantiate the child class and call them as shown below:\nobj = Student(\"John\") # Instantiates \"Student\" class\nobj.test1() # Class method\nobj.test2() # Static method\nprint(obj.name) # Getter\nobj.name = \"Tom\" # Setter\nprint(obj.name) # Getter\ndel obj.name # Deleter\nprint(hasattr(obj, \"name\"))\n\nOutput:\nTest1\nTest2\nJohn \nTom \nFalse\n\nYou can see my answer which explains about abstract property.\n" ]
[ 348, 40, 16, 5, 0 ]
[]
[]
[ "abstract_class", "python", "static_methods" ]
stackoverflow_0004474395_abstract_class_python_static_methods.txt
Q: Find the difference between two columns in a dataframe but keeping the row index avaiable I have two dataframes: df1 = pd.DataFrame({"product":['apples', 'bananas', 'oranges', 'kiwi']}) df2 = pd.Dataframe({"product":['apples', 'aples', 'appples', 'banans', 'oranges', 'kiwki'], "key": [1, 2, 3, 4, 5, 6]}) I want to use something like a set(df2).difference(df1) to find the difference between the product columns but I want to keep the indexes. So ideally the output would look like this result =['aples', 'appples', 'banans', 'kiwki'][2 3 4 6] Whenever I use the set.difference() I get the list of the different values but I lose the key index. A: I guess you are trying to do a left anti join, which means you only want to keep the rows in df2 that aren't present in df1. In that case: df1 = pd.DataFrame({"product":['apples', 'bananas', 'oranges', 'kiwi']}) df2 = pd.DataFrame({"product":['apples', 'aples', 'appples', 'banans', 'oranges', 'kiwki'], "key":[1, 2, 3, 4, 5, 6]}) # left join joined_df = df2.merge(df1, on='product', how='left', indicator=True) # keeping products that were only present in df2 products_only_in_df2 = joined_df.loc[joined_df['_merge'] == 'left_only', 'product'] # filtering df2 using the above df so we have the keys as well result = df2[df2['product'].isin(products_only_in_df2)] A: You have to filter the df2 frame checking if the elements from df2 are not in df1: df2[~df2["product"].isin(df1['product'])] ~ negates the values of a boolean Series. ser1.isin(ser2) is a boolean Series which gives, for each element of ser 1, whether or not the value can be found in ser2.
Find the difference between two columns in a dataframe but keeping the row index avaiable
I have two dataframes: df1 = pd.DataFrame({"product":['apples', 'bananas', 'oranges', 'kiwi']}) df2 = pd.Dataframe({"product":['apples', 'aples', 'appples', 'banans', 'oranges', 'kiwki'], "key": [1, 2, 3, 4, 5, 6]}) I want to use something like a set(df2).difference(df1) to find the difference between the product columns but I want to keep the indexes. So ideally the output would look like this result =['aples', 'appples', 'banans', 'kiwki'][2 3 4 6] Whenever I use the set.difference() I get the list of the different values but I lose the key index.
[ "I guess you are trying to do a left anti join, which means you only want to keep the rows in df2 that aren't present in df1. In that case:\ndf1 = pd.DataFrame({\"product\":['apples', 'bananas', 'oranges', 'kiwi']})\ndf2 = pd.DataFrame({\"product\":['apples', 'aples', 'appples', 'banans', 'oranges', 'kiwki'], \"key\":[1, 2, 3, 4, 5, 6]})\n\n# left join\njoined_df = df2.merge(df1, on='product', how='left', indicator=True)\n# keeping products that were only present in df2\nproducts_only_in_df2 = joined_df.loc[joined_df['_merge'] == 'left_only', 'product']\n# filtering df2 using the above df so we have the keys as well\nresult = df2[df2['product'].isin(products_only_in_df2)]\n\n", "You have to filter the df2 frame checking if the elements from df2 are not in df1:\ndf2[~df2[\"product\"].isin(df1['product'])]\n\n\n~ negates the values of a boolean Series.\nser1.isin(ser2) is a boolean Series which gives, for each element of ser 1, whether or not the value can be found in ser2.\n\n" ]
[ 0, 0 ]
[]
[]
[ "dataframe", "pandas", "python", "set" ]
stackoverflow_0074535375_dataframe_pandas_python_set.txt
Q: Class that holds variables and has methods? I have a class like this: class ErrorMessages(object): """a class that holds all error messages and then presents them to the user)""" messages= [] userStrMessages= "" def newError(self, Error): self.userStrMessages+= Error def __str__(self): if self.messages.count() != 0: i=0 for thing in self.messages: self.userStrMessages += self.messages[i] + "\n" i+=1 return self.userStrMessages this doesn't work when I call on it like this and wants 2 input variables, but that is just self and what i put into it?: ErrorMessages.newError(errormessage) errormessage is a string i have a (what i think is) a static class? (new to this and learned in swedish which makes this harder) it looks like this class EZSwitch(object): fileway= "G:/Other computers/Stationär Dator/Files/Pyhton Program/Scheman" fileway2= "Kolmården.txt" numberSchedules= 30 I thought i could make my errormessagclass like this but it also does things with methods. Like adds things to the list messages on newError. It doesnt seem to be possible or how would I do this? A: If you want to call a method directly without making an object first then you'll have to make your method newError() a class method and then call it as you mentioned above. @classmethod def newError(self, Error): self.userStrMessages+= Error ErrorMessages.newError(errormessage) Otherwise you can create an object first and then call the method as: err_object = ErrorMessage() err_object.newError(errormessage)
Class that holds variables and has methods?
I have a class like this: class ErrorMessages(object): """a class that holds all error messages and then presents them to the user)""" messages= [] userStrMessages= "" def newError(self, Error): self.userStrMessages+= Error def __str__(self): if self.messages.count() != 0: i=0 for thing in self.messages: self.userStrMessages += self.messages[i] + "\n" i+=1 return self.userStrMessages this doesn't work when I call on it like this and wants 2 input variables, but that is just self and what i put into it?: ErrorMessages.newError(errormessage) errormessage is a string i have a (what i think is) a static class? (new to this and learned in swedish which makes this harder) it looks like this class EZSwitch(object): fileway= "G:/Other computers/Stationär Dator/Files/Pyhton Program/Scheman" fileway2= "Kolmården.txt" numberSchedules= 30 I thought i could make my errormessagclass like this but it also does things with methods. Like adds things to the list messages on newError. It doesnt seem to be possible or how would I do this?
[ "If you want to call a method directly without making an object first then you'll have to make your method newError() a class method and then call it as you mentioned above.\n@classmethod\ndef newError(self, Error): \n self.userStrMessages+= Error\n\nErrorMessages.newError(errormessage)\n\nOtherwise you can create an object first and then call the method as:\nerr_object = ErrorMessage()\nerr_object.newError(errormessage)\n\n" ]
[ -1 ]
[ "newError is an instance method. You'd need to instantiate it and then call it:\nmyMessage = ErrorMessage()\nmyMessage.newError(\"important error message\")\n\nSee this question about making it a static/class method, which would let you call it on the class without instantiation.\n" ]
[ -2 ]
[ "class", "object", "python" ]
stackoverflow_0074535599_class_object_python.txt
Q: How to use dot notation for dict in python? I'm very new to python and I wish I could do . notation to access values of a dict. Lets say I have test like this: >>> test = dict() >>> test['name'] = 'value' >>> print(test['name']) value But I wish I could do test.name to get value. Infact I did it by overriding the __getattr__ method in my class like this: class JuspayObject: def __init__(self,response): self.__dict__['_response'] = response def __getattr__(self,key): try: return self._response[key] except KeyError,err: sys.stderr.write('Sorry no key matches') and this works! when I do: test.name // I get value. But the problem is when I just print test alone I get the error as: 'Sorry no key matches' Why is this happening? A: This functionality already exists in the standard libraries, so I recommend you just use their class. >>> from types import SimpleNamespace >>> d = {'key1': 'value1', 'key2': 'value2'} >>> n = SimpleNamespace(**d) >>> print(n) namespace(key1='value1', key2='value2') >>> n.key2 'value2' Adding, modifying and removing values is achieved with regular attribute access, i.e. you can use statements like n.key = val and del n.key. To go back to a dict again: >>> vars(n) {'key1': 'value1', 'key2': 'value2'} The keys in your dict should be string identifiers for attribute access to work properly. Simple namespace was added in Python 3.3. For older versions of the language, argparse.Namespace has similar behaviour. A: I assume that you are comfortable in Javascript and want to borrow that kind of syntax... I can tell you by personal experience that this is not a great idea. It sure does look less verbose and neat; but in the long run it is just obscure. Dicts are dicts, and trying to make them behave like objects with attributes will probably lead to (bad) surprises. If you need to manipulate the fields of an object as if they were a dictionary, you can always resort to use the internal __dict__ attribute when you need it, and then it is explicitly clear what you are doing. Or use getattr(obj, 'key') to have into account the inheritance structure and class attributes too. But by reading your example it seems that you are trying something different... As the dot operator will already look in the __dict__ attribute without any extra code. A: In addition to this answer, one can add support for nested dicts as well: from types import SimpleNamespace class NestedNamespace(SimpleNamespace): def __init__(self, dictionary, **kwargs): super().__init__(**kwargs) for key, value in dictionary.items(): if isinstance(value, dict): self.__setattr__(key, NestedNamespace(value)) else: self.__setattr__(key, value) nested_namespace = NestedNamespace({ 'parent': { 'child': { 'grandchild': 'value' } }, 'normal_key': 'normal value', }) print(nested_namespace.parent.child.grandchild) # value print(nested_namespace.normal_key) # normal value Note that this does not support dot notation for dicts that are somewhere inside e.g. lists. A: Could you use a named tuple? from collections import namedtuple Test = namedtuple('Test', 'name foo bar') my_test = Test('value', 'foo_val', 'bar_val') print(my_test) print(my_test.name) A: __getattr__ is used as a fallback when all other attribute lookup rules have failed. When you try to "print" your object, Python look for a __repr__ method, and since you don't implement it in your class it ends up calling __getattr__ (yes, in Python methods are attributes too). You shouldn't assume which key getattr will be called with, and, most important, __getattr__ must raise an AttributeError if it cannot resolve key. As a side note: don't use self.__dict__ for ordinary attribute access, just use the plain attribute notation: class JuspayObject: def __init__(self,response): # don't use self.__dict__ here self._response = response def __getattr__(self,key): try: return self._response[key] except KeyError,err: raise AttributeError(key) Now if your class has no other responsability (and your Python version is >= 2.6 and you don't need to support older versions), you may just use a namedtuple : http://docs.python.org/2/library/collections.html#collections.namedtuple A: You can use the built-in method argparse.Namespace(): import argparse args = argparse.Namespace() args.name = 'value' print(args.name) # 'value' You can also get the original dict via vars(args). A: You have to be careful when using __getattr__, because it's used for a lot of builtin Python functionality. Try something like this... class JuspayObject: def __init__(self,response): self.__dict__['_response'] = response def __getattr__(self, key): # First, try to return from _response try: return self.__dict__['_response'][key] except KeyError: pass # If that fails, return default behavior so we don't break Python try: return self.__dict__[key] except KeyError: raise AttributeError, key >>> j = JuspayObject({'foo': 'bar'}) >>> j.foo 'bar' >>> j <__main__.JuspayObject instance at 0x7fbdd55965f0> A: Here is a simple, handy dot notation helper example that is working with nested items: def dict_get(data:dict, path:str, default = None): pathList = re.split(r'\.', path, flags=re.IGNORECASE) result = data for key in pathList: try: key = int(key) if key.isnumeric() else key result = result[key] except: result = default break return result Usage example: my_dict = {"test1": "str1", "nested_dict": {"test2": "str2"}, "nested_list": ["str3", {"test4": "str4"}]} print(dict_get(my_dict, "test1")) # str1 print(dict_get(my_dict, "nested_dict.test2")) # str2 print(dict_get(my_dict, "nested_list.1.test4")) # str4 A: With a small addition to this answer you can support lists as well: class NestedNamespace(SimpleNamespace): def __init__(self, dictionary, **kwargs): super().__init__(**kwargs) for key, value in dictionary.items(): if isinstance(value, dict): self.__setattr__(key, NestedNamespace(value)) elif isinstance(value, list): self.__setattr__(key, map(NestedNamespace, value)) else: self.__setattr__(key, value) A: I use the dotted_dict package: >>> from dotted_dict import DottedDict >>> test = DottedDict() >>> test.name = 'value' >>> print(test.name) value A: 2022 answer: I've created the dotwiz package -- this is a fast, tiny library that seems to perform really well in most cases. >>> from dotwiz import DotWiz >>> test = DotWiz(hello='world') >>> test.works = True >>> test ✫(hello='world', works=True) >>> test.hello 'world' >>> assert test.works A: class convert_to_dot_notation(dict): """ Access dictionary attributes via dot notation """ __getattr__ = dict.get __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ test = {"name": "value"} data = convert_to_dot_notation(test) print(data.name) A: #!/usr/bin/env python3 import json from sklearn.utils import Bunch from collections.abc import MutableMapping def dotted(inpt: MutableMapping, *args, **kwargs ) -> Bunch: """ Enables recursive dot notation for ``dict``. """ return json.loads(json.dumps(inpt), object_hook=lambda x: Bunch(**{**Bunch(), **x})) A: This feature is baked into OmegaConf: from omegaconf import OmegaConf your_dict = {"k" : "v", "list" : [1, {"a": "1", "b": "2", 3: "c"}]} adot_dict = OmegaConf.create(your_dict) print(adot_dict.k) print(adot_dict.list) Installation is: pip install omegaconf This lib comes in handy for configurations, which it is actually made for: from omegaconf import OmegaConf cfg = OmegaConf.load('config.yml') print(cfg.data_path) A: You can make hacks adding dot notation to Dicts mostly work, but there are always namespace problems. As in, what does this do? x = DotDict() x["values"] = 1989 print(x. values) I use pydash, which is a Python port of JS's lodash, to do these things a different way when the nesting gets too ugly.
How to use dot notation for dict in python?
I'm very new to python and I wish I could do . notation to access values of a dict. Lets say I have test like this: >>> test = dict() >>> test['name'] = 'value' >>> print(test['name']) value But I wish I could do test.name to get value. Infact I did it by overriding the __getattr__ method in my class like this: class JuspayObject: def __init__(self,response): self.__dict__['_response'] = response def __getattr__(self,key): try: return self._response[key] except KeyError,err: sys.stderr.write('Sorry no key matches') and this works! when I do: test.name // I get value. But the problem is when I just print test alone I get the error as: 'Sorry no key matches' Why is this happening?
[ "This functionality already exists in the standard libraries, so I recommend you just use their class. \n>>> from types import SimpleNamespace\n>>> d = {'key1': 'value1', 'key2': 'value2'}\n>>> n = SimpleNamespace(**d)\n>>> print(n)\nnamespace(key1='value1', key2='value2')\n>>> n.key2\n'value2'\n\nAdding, modifying and removing values is achieved with regular attribute access, i.e. you can use statements like n.key = val and del n.key. \nTo go back to a dict again:\n>>> vars(n)\n{'key1': 'value1', 'key2': 'value2'}\n\nThe keys in your dict should be string identifiers for attribute access to work properly. \nSimple namespace was added in Python 3.3. For older versions of the language, argparse.Namespace has similar behaviour. \n", "I assume that you are comfortable in Javascript and want to borrow that kind of syntax... I can tell you by personal experience that this is not a great idea.\nIt sure does look less verbose and neat; but in the long run it is just obscure. Dicts are dicts, and trying to make them behave like objects with attributes will probably lead to (bad) surprises.\nIf you need to manipulate the fields of an object as if they were a dictionary, you can always resort to use the internal __dict__ attribute when you need it, and then it is explicitly clear what you are doing. Or use getattr(obj, 'key') to have into account the inheritance structure and class attributes too.\nBut by reading your example it seems that you are trying something different... As the dot operator will already look in the __dict__ attribute without any extra code.\n", "In addition to this answer, one can add support for nested dicts as well:\nfrom types import SimpleNamespace\n\nclass NestedNamespace(SimpleNamespace):\n def __init__(self, dictionary, **kwargs):\n super().__init__(**kwargs)\n for key, value in dictionary.items():\n if isinstance(value, dict):\n self.__setattr__(key, NestedNamespace(value))\n else:\n self.__setattr__(key, value)\n\nnested_namespace = NestedNamespace({\n 'parent': {\n 'child': {\n 'grandchild': 'value'\n }\n },\n 'normal_key': 'normal value',\n})\n\n\nprint(nested_namespace.parent.child.grandchild) # value\nprint(nested_namespace.normal_key) # normal value\n\nNote that this does not support dot notation for dicts that are somewhere inside e.g. lists.\n", "Could you use a named tuple?\nfrom collections import namedtuple\nTest = namedtuple('Test', 'name foo bar')\nmy_test = Test('value', 'foo_val', 'bar_val')\nprint(my_test)\nprint(my_test.name)\n\n\n", "__getattr__ is used as a fallback when all other attribute lookup rules have failed. When you try to \"print\" your object, Python look for a __repr__ method, and since you don't implement it in your class it ends up calling __getattr__ (yes, in Python methods are attributes too). You shouldn't assume which key getattr will be called with, and, most important, __getattr__ must raise an AttributeError if it cannot resolve key.\nAs a side note: don't use self.__dict__ for ordinary attribute access, just use the plain attribute notation: \nclass JuspayObject:\n\n def __init__(self,response):\n # don't use self.__dict__ here\n self._response = response\n\n def __getattr__(self,key):\n try:\n return self._response[key]\n except KeyError,err:\n raise AttributeError(key)\n\nNow if your class has no other responsability (and your Python version is >= 2.6 and you don't need to support older versions), you may just use a namedtuple : http://docs.python.org/2/library/collections.html#collections.namedtuple\n", "You can use the built-in method argparse.Namespace():\nimport argparse\n\nargs = argparse.Namespace()\nargs.name = 'value'\n\nprint(args.name)\n# 'value'\n\nYou can also get the original dict via vars(args).\n", "You have to be careful when using __getattr__, because it's used for a lot of builtin Python functionality.\nTry something like this...\nclass JuspayObject:\n\n def __init__(self,response):\n self.__dict__['_response'] = response\n\n def __getattr__(self, key):\n # First, try to return from _response\n try:\n return self.__dict__['_response'][key]\n except KeyError:\n pass\n # If that fails, return default behavior so we don't break Python\n try:\n return self.__dict__[key]\n except KeyError:\n raise AttributeError, key\n\n>>> j = JuspayObject({'foo': 'bar'})\n>>> j.foo\n'bar'\n>>> j\n<__main__.JuspayObject instance at 0x7fbdd55965f0>\n\n", "Here is a simple, handy dot notation helper example that is working with nested items:\ndef dict_get(data:dict, path:str, default = None):\n pathList = re.split(r'\\.', path, flags=re.IGNORECASE)\n result = data\n for key in pathList:\n try:\n key = int(key) if key.isnumeric() else key \n result = result[key]\n except:\n result = default\n break\n \n return result\n\nUsage example:\nmy_dict = {\"test1\": \"str1\", \"nested_dict\": {\"test2\": \"str2\"}, \"nested_list\": [\"str3\", {\"test4\": \"str4\"}]}\nprint(dict_get(my_dict, \"test1\"))\n# str1\nprint(dict_get(my_dict, \"nested_dict.test2\"))\n# str2\nprint(dict_get(my_dict, \"nested_list.1.test4\"))\n# str4\n\n", "With a small addition to this answer you can support lists as well:\nclass NestedNamespace(SimpleNamespace):\ndef __init__(self, dictionary, **kwargs):\n super().__init__(**kwargs)\n for key, value in dictionary.items():\n if isinstance(value, dict):\n self.__setattr__(key, NestedNamespace(value))\n elif isinstance(value, list):\n self.__setattr__(key, map(NestedNamespace, value))\n else:\n self.__setattr__(key, value)\n\n", "I use the dotted_dict package:\n>>> from dotted_dict import DottedDict\n>>> test = DottedDict()\n>>> test.name = 'value'\n>>> print(test.name)\nvalue\n\n", "2022 answer: I've created the dotwiz package -- this is a fast, tiny library that seems to perform really well in most cases.\n>>> from dotwiz import DotWiz\n>>> test = DotWiz(hello='world')\n>>> test.works = True\n>>> test\n✫(hello='world', works=True)\n>>> test.hello\n'world'\n>>> assert test.works\n\n", "class convert_to_dot_notation(dict):\n \"\"\"\n Access dictionary attributes via dot notation\n \"\"\"\n\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\ntest = {\"name\": \"value\"}\ndata = convert_to_dot_notation(test)\nprint(data.name)\n\n", "#!/usr/bin/env python3\n\n\nimport json\nfrom sklearn.utils import Bunch\nfrom collections.abc import MutableMapping\n\n\ndef dotted(inpt: MutableMapping,\n *args,\n **kwargs\n ) -> Bunch:\n \"\"\"\n Enables recursive dot notation for ``dict``.\n \"\"\"\n\n return json.loads(json.dumps(inpt),\n object_hook=lambda x:\n Bunch(**{**Bunch(), **x}))\n\n", "This feature is baked into OmegaConf:\nfrom omegaconf import OmegaConf\n\nyour_dict = {\"k\" : \"v\", \"list\" : [1, {\"a\": \"1\", \"b\": \"2\", 3: \"c\"}]}\nadot_dict = OmegaConf.create(your_dict)\n\nprint(adot_dict.k)\nprint(adot_dict.list)\n\nInstallation is:\npip install omegaconf\nThis lib comes in handy for configurations, which it is actually made for:\nfrom omegaconf import OmegaConf\ncfg = OmegaConf.load('config.yml')\nprint(cfg.data_path)\n\n", "You can make hacks adding dot notation to Dicts mostly work, but there are always namespace problems. As in, what does this do?\nx = DotDict()\nx[\"values\"] = 1989\nprint(x. values)\n\nI use pydash, which is a Python port of JS's lodash, to do these things a different way when the nesting gets too ugly.\n" ]
[ 246, 55, 17, 11, 6, 4, 3, 3, 2, 1, 1, 1, 0, 0, 0 ]
[ "Add a __repr__() method to the class so that you can customize the text to be shown on \nprint text\n\nLearn more here: https://web.archive.org/web/20121022015531/http://diveintopython.net/object_oriented_framework/special_class_methods2.html\n" ]
[ -1 ]
[ "dictionary", "nested", "nested_properties", "python" ]
stackoverflow_0016279212_dictionary_nested_nested_properties_python.txt
Q: How to read a text file and make it a dataframe using pandas I want to read the files present in this folder - uwyo and read this as a data frame while skipping the rows in between the observation data. I want to read every observation where it starts from the keyword- pressure. For that I thought of using pandas and then start searching for the word 'pressure', but I got the following error. import pandas as pd import glob import numpy as np import matplotlib.dates as mdates import matplotlib.pyplot as plt dfs = [] for fname in glob.glob('*.txt'): df = pd.read_csv(fname,delimiter='\s+',header=None) ParserError: Error tokenizing data. C error: Expected 9 fields in line 5, saw 11 Is there an efficient way to do this? I want to skip the station information and all those texts present in between. A: Try it as: pd.read_csv(fname, sep='\s+', on_bad_lines='skip', skiprows=4) This will read the file with a lot of trash though. Also, missing values in the txt file would appear in the wrong column. I would recommend trying to identify the timestamps you have available and add a column for them, as well as identifying and removing the metadata present in between each period of observations. This will require some pre-work on the data :D Edit: Sorry, forgot to add this as a second line above: (it will filter out most of the trash) df[pd.to_numeric(df['PRES'], errors='coerce').notnull()]
How to read a text file and make it a dataframe using pandas
I want to read the files present in this folder - uwyo and read this as a data frame while skipping the rows in between the observation data. I want to read every observation where it starts from the keyword- pressure. For that I thought of using pandas and then start searching for the word 'pressure', but I got the following error. import pandas as pd import glob import numpy as np import matplotlib.dates as mdates import matplotlib.pyplot as plt dfs = [] for fname in glob.glob('*.txt'): df = pd.read_csv(fname,delimiter='\s+',header=None) ParserError: Error tokenizing data. C error: Expected 9 fields in line 5, saw 11 Is there an efficient way to do this? I want to skip the station information and all those texts present in between.
[ "Try it as: pd.read_csv(fname, sep='\\s+', on_bad_lines='skip', skiprows=4)\nThis will read the file with a lot of trash though. Also, missing values in the txt file would appear in the wrong column.\nI would recommend trying to identify the timestamps you have available and add a column for them, as well as identifying and removing the metadata present in between each period of observations.\nThis will require some pre-work on the data :D\nEdit:\nSorry, forgot to add this as a second line above: (it will filter out most of the trash)\ndf[pd.to_numeric(df['PRES'], errors='coerce').notnull()]\n" ]
[ 1 ]
[]
[]
[ "csv", "dataframe", "pandas", "python", "text" ]
stackoverflow_0074535453_csv_dataframe_pandas_python_text.txt
Q: Dropping rows that contains a specific condition I got a dataset and I want to drop a few unusable rows. I used a filter to the specific condition in which i want the rows to be dropped filter = df.groupby(['Bairro'], group_keys=False, sort=True).size() > 1 print(filter.to_string()) Bairro 01 True 02 False All the data in which the condition is false is useless. I've tried a few things, none of them work. So, I'd like the dataframe to maintain only the values where the condition is true: Bairro 01 True df2 = ((df.groupby(['Bairro']).size()) != 1) I was even planning to dropping value by value, but it didn't work as well df2 = df[~df.isin(['02']).any(axis=1)] Tried passing the filter as a condition: df.drop(df[df.groupby(['Bairro'], group_keys=False, sort=True).size() > 1], inplace = True) A: It seems like the df.loc method could help you in this instance. In your example: new_df = df.loc[df['col2'] == "True"] Or if you would like to use multiple conditions: new_df = df.loc[(df['col1'] == "True") & (df['col2'] == "True")] A: I think you're over-engineering your solution therefore I've opted for a more detailed explaination of the answer. One way to filter a dataframe is to simply subscript a list/array of booleans. If the length of the array is the same as the length of the dataframe, this will output a view of the dataframe containing only rows aligned with the True values. Here is an example: import pandas as pd df = pd.DataFrame({ 'numbers': [0,1,2,3,4], 'letters': ['a','b','c','d','e'], 'colors': ['red', 'blue', 'yellow', 'green', 'purple'] }) df Which outputs: numbers letters colors 0 0 a red 1 1 b blue 2 2 c yellow 3 3 d green 4 4 e purple This is what I mean by subscripting a boolean list (not sure if this is accepted terminology) boolean_list = [True, True, False, True, False] filtered_df = df[boolean_list] filtered_df Which outputs: numbers letters colors 0 0 a red 1 1 b blue 3 3 d green We can use simple arguments to produce this boolean list from a dataframe df['numbers']>2 Outputs: 0 False 1 False 2 False 3 True 4 True Name: numbers, dtype: bool We can streamline the filtering with this redundant looking piece of code: df[df['numbers']>2] outputs: numbers letters colors 3 3 d green 4 4 e purple While it looks redundant, all we've done there is subscribe a list of booleans. As written, this does not change df at all, for that we would need to do df = df[filter_argument] For more complicated filtering we can use .apply() to get our list of booleans. Say we only want rows where the letter in 'letters' is present in the color in 'colors': def letter_in_color(row): return row['letters'] in row['colors'] boolean_arr = df.apply(letter_in_color, axis = 1) print(boolean_arr) 0 False 1 True 2 False 3 False 4 True dtype: bool letter_in_color_df = df[boolean_array] letter_in_color_df numbers letters colors 1 1 b blue 4 4 e purple I did this long explaination because while the concept of filtering a df with a boolean array is quite simple, looking at code which does that often looks weird or redundant and it isn't clear what is really going on. I hope you didn't stop reading there: because there is an important and powerful tool which you can add to the above situations to preclude many errors and unexpected behavior: ".loc[]" This is a more explicit and powerful indexer, and in all of the above cases we can gain its benefits with very few changes: df[boolean_array] becomes df.loc[boolean_array] For more information about df.loc[] instead of df[] see this answer
Dropping rows that contains a specific condition
I got a dataset and I want to drop a few unusable rows. I used a filter to the specific condition in which i want the rows to be dropped filter = df.groupby(['Bairro'], group_keys=False, sort=True).size() > 1 print(filter.to_string()) Bairro 01 True 02 False All the data in which the condition is false is useless. I've tried a few things, none of them work. So, I'd like the dataframe to maintain only the values where the condition is true: Bairro 01 True df2 = ((df.groupby(['Bairro']).size()) != 1) I was even planning to dropping value by value, but it didn't work as well df2 = df[~df.isin(['02']).any(axis=1)] Tried passing the filter as a condition: df.drop(df[df.groupby(['Bairro'], group_keys=False, sort=True).size() > 1], inplace = True)
[ "It seems like the df.loc method could help you in this instance. In your example:\nnew_df = df.loc[df['col2'] == \"True\"]\n\nOr if you would like to use multiple conditions:\nnew_df = df.loc[(df['col1'] == \"True\") & (df['col2'] == \"True\")]\n\n", "I think you're over-engineering your solution therefore I've opted for a more detailed explaination of the answer.\nOne way to filter a dataframe is to simply subscript a list/array of booleans. If the length of the array is the same as the length of the dataframe, this will output a view of the dataframe containing only rows aligned with the True values.\nHere is an example:\nimport pandas as pd\ndf = pd.DataFrame({\n 'numbers': [0,1,2,3,4],\n 'letters': ['a','b','c','d','e'],\n 'colors': ['red', 'blue', 'yellow', 'green', 'purple']\n})\ndf\n\nWhich outputs:\n\n\n\n\n\nnumbers\nletters\ncolors\n\n\n\n\n0\n0\na\nred\n\n\n1\n1\nb\nblue\n\n\n2\n2\nc\nyellow\n\n\n3\n3\nd\ngreen\n\n\n4\n4\ne\npurple\n\n\n\n\nThis is what I mean by subscripting a boolean list (not sure if this is accepted terminology)\nboolean_list = [True, True, False, True, False]\nfiltered_df = df[boolean_list]\nfiltered_df\n\nWhich outputs:\n\n\n\n\n\nnumbers\nletters\ncolors\n\n\n\n\n0\n0\na\nred\n\n\n1\n1\nb\nblue\n\n\n3\n3\nd\ngreen\n\n\n\n\nWe can use simple arguments to produce this boolean list from a dataframe\ndf['numbers']>2\n\nOutputs:\n0 False\n1 False\n2 False\n3 True\n4 True\nName: numbers, dtype: bool\n\nWe can streamline the filtering with this redundant looking piece of code:\ndf[df['numbers']>2]\n\noutputs:\n\n\n\n\n\nnumbers\nletters\ncolors\n\n\n\n\n3\n3\nd\ngreen\n\n\n4\n4\ne\npurple\n\n\n\n\nWhile it looks redundant, all we've done there is subscribe a list of booleans. As written, this does not change df at all, for that we would need to do df = df[filter_argument]\nFor more complicated filtering we can use .apply() to get our list of booleans. Say we only want rows where the letter in 'letters' is present in the color in 'colors':\ndef letter_in_color(row):\n return row['letters'] in row['colors']\nboolean_arr = df.apply(letter_in_color, axis = 1)\nprint(boolean_arr)\n\n0 False\n1 True\n2 False\n3 False\n4 True\ndtype: bool\n\nletter_in_color_df = df[boolean_array]\nletter_in_color_df\n\n\n\n\n\n\nnumbers\nletters\ncolors\n\n\n\n\n1\n1\nb\nblue\n\n\n4\n4\ne\npurple\n\n\n\n\nI did this long explaination because while the concept of filtering a df with a boolean array is quite simple, looking at code which does that often looks weird or redundant and it isn't clear what is really going on.\nI hope you didn't stop reading there:\nbecause there is an important and powerful tool which you can add to the above situations to preclude many errors and unexpected behavior: \".loc[]\" This is a more explicit and powerful indexer, and in all of the above cases we can gain its benefits with very few changes:\ndf[boolean_array] becomes df.loc[boolean_array]\nFor more information about df.loc[] instead of df[] see this answer\n" ]
[ 2, 1 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074534991_dataframe_pandas_python.txt
Q: How to designate unreachable python code What's the pythonic way to designate unreachable code in python as in: gender = readFromDB(...) # either 'm' or 'f' if gender == 'm': greeting = 'Mr.' elif gender == 'f': greeting = 'Ms.' else: # What should this line say? A: raise ValueError('invalid gender %r' % gender) A: You could raise an exception: raise ValueError("Unexpected gender; expected 'm' or 'f', got %s" % gender) or use an assert False if you expect the database to return only 'm' or 'f': assert False, "Unexpected gender; expected 'm' or 'f', got %s" % gender A: This depends on how sure you are of the gender being either 'm' or 'f'. If you're absolutely certain, use if...else instead of if...elif...else. Just makes it easier for everyone. If there's any chance of malformed data, however, you should probably raise an exception to make testing and bug-fixing easier. You could use a gender-neutral greeting in this case, but for anything bigger, special values just make bugs harder to find. A: I actually think that there's a place for this. class SeriousDesignError(Exception): pass So you can do this if number % 2 == 0: result = "Even" elif number % 2 == 1: result = "Odd" else: raise SeriousDesignError() I think this is the most meaningful error message. This kind of thing can only arise through design errors (or bad maintenance, which is the same thing.) A: I sometimes do: if gender == 'm': greeting = 'Mr.' else: assert gender == 'f' greeting = 'Ms.' I think this does a good job of telling a reader of the code that there are only (in this case) two possibilities, and what they are. Although you could make a case for raising a more descriptive error than AssertionError. A: It depends exactly what you want the error to signal, but I would use a dictionary in this case: greetings = {'m': 'Mr.', 'f': 'Ms.'} gender = readFromDB(...) # either 'm' or 'f' greeting = greetings[gender] If gender is neither m nor f, this will raise a KeyError containing the unexpected value: greetings = {'m': 'Mr.', 'f': 'Ms.'} >>> greetings['W'] Traceback (most recent call last): File "<pyshell#4>", line 1, in <module> greetings['W'] KeyError: 'W' If you want more detail in the message, you can catch & reraise it: try: greeting = greetings[gender] except KeyError,e: raise ValueError('Unrecognized gender %s' % gender) A: Until now, I've usually used a variation on John Fouhy's answer -- but this is not exactly correct, as Ethan points out: assert gender in ('m', 'f') if gender == 'm': greeting = 'Mr.' else: greeting = 'Ms.' The main problem with using an assert is that if anyone runs your code with the -O or -OO flags, the asserts get optimized away. As Ethan points out below, that means you now have no data checks at all. Asserts are a development aid and shouldn't be used for production logic. I'm going to get into the habit of using a check() function instead -- this allows for clean calling syntax like an assert: def check(condition, msg=None): if not condition: raise ValueError(msg or '') check(gender in ('m', 'f')) if gender == 'm': greeting = 'Mr.' else: greeting = 'Ms.' Going back to the original question, I'd claim that using an assert() or check() prior to the if/else logic is easier to read, safer, and more explicit: it tests the data quality first before starting to act on it -- this might be important if there are operators other than '==' in the if/else chain it separates the assertion test from the branching logic, rather than interleaving them -- this makes reading and refactoring easier A: You can use and typing.assert_never to assert unreachability in a way that can be verified by static type checkers such as mypy and pyright. With earlier versions of python (pre python3.11), assert_never is not available in the typing module, so you'll need to import it from the typing_extensions module. Here's an example using typing.assert_never together with typing.Literal to assert that the else-block from OP's question is unreachable: from typing import Literal from typing_extensions import assert_never var: Literal["m", "f"] = "m" if var == "m": ... elif var == "f": ... else: assert_never(var) # unreachable Static type checkers will verify that the else-block above is indeed unreachable. If the assert_never block is reached at runtime, it will throw an AssertionError: >>> from typing_extensions import assert_never >>> assert_never(123) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/homestar/tmp/.direnv/python-3.9.15/lib/python3.9/site-packages/typing_extensions.py", line 1993, in assert_never raise AssertionError("Expected code to be unreachable") AssertionError: Expected code to be unreachable Here is an example with a function whose return-type annotation is typing.NoReturn: import time from typing import NoReturn from typing_extensions import Never, assert_never def foo() -> NoReturn: while True: time.sleep(1) bar: Never = foo() assert_never(bar) # unreachable The typing.Never type is available in typing module for python >=3.11 and in typing_extensions for earlier versions of python. Here's another example using if/elif/else blocks to match against the type of a variable: from typing import Union from typing_extensions import assert_never var: Union[int, str, float] = "123" if isinstance(var, int): ... elif isinstance(var, str): ... elif isinstance(var, float): ... else: assert_never(var) # unreachable For more examples, including matching against the values of an enum type, see the Python typing docs on Unreachable Code and Exhaustiveness Checking.
How to designate unreachable python code
What's the pythonic way to designate unreachable code in python as in: gender = readFromDB(...) # either 'm' or 'f' if gender == 'm': greeting = 'Mr.' elif gender == 'f': greeting = 'Ms.' else: # What should this line say?
[ "raise ValueError('invalid gender %r' % gender)\n\n", "You could raise an exception:\nraise ValueError(\"Unexpected gender; expected 'm' or 'f', got %s\" % gender)\n\nor use an assert False if you expect the database to return only 'm' or 'f':\nassert False, \"Unexpected gender; expected 'm' or 'f', got %s\" % gender\n\n", "This depends on how sure you are of the gender being either 'm' or 'f'.\nIf you're absolutely certain, use if...else instead of if...elif...else. Just makes it easier for everyone.\nIf there's any chance of malformed data, however, you should probably raise an exception to make testing and bug-fixing easier. You could use a gender-neutral greeting in this case, but for anything bigger, special values just make bugs harder to find.\n", "I actually think that there's a place for this.\nclass SeriousDesignError(Exception):\n pass\n\nSo you can do this\nif number % 2 == 0:\n result = \"Even\"\nelif number % 2 == 1:\n result = \"Odd\"\nelse:\n raise SeriousDesignError()\n\nI think this is the most meaningful error message. This kind of thing can only arise through design errors (or bad maintenance, which is the same thing.)\n", "I sometimes do:\nif gender == 'm':\n greeting = 'Mr.'\nelse:\n assert gender == 'f'\n greeting = 'Ms.'\n\nI think this does a good job of telling a reader of the code that there are only (in this case) two possibilities, and what they are. Although you could make a case for raising a more descriptive error than AssertionError.\n", "It depends exactly what you want the error to signal, but I would use a dictionary in this case:\ngreetings = {'m': 'Mr.', 'f': 'Ms.'}\ngender = readFromDB(...) # either 'm' or 'f'\ngreeting = greetings[gender]\n\nIf gender is neither m nor f, this will raise a KeyError containing the unexpected value:\ngreetings = {'m': 'Mr.', 'f': 'Ms.'}\n\n>>> greetings['W']\n\nTraceback (most recent call last):\n File \"<pyshell#4>\", line 1, in <module>\n greetings['W']\nKeyError: 'W'\n\nIf you want more detail in the message, you can catch & reraise it:\ntry:\n greeting = greetings[gender]\nexcept KeyError,e:\n raise ValueError('Unrecognized gender %s' % gender)\n\n", "Until now, I've usually used a variation on John Fouhy's answer -- but this is not exactly correct, as Ethan points out:\nassert gender in ('m', 'f')\nif gender == 'm':\n greeting = 'Mr.'\nelse:\n greeting = 'Ms.'\n\nThe main problem with using an assert is that if anyone runs your code with the -O or -OO flags, the asserts get optimized away. As Ethan points out below, that means you now have no data checks at all. Asserts are a development aid and shouldn't be used for production logic. I'm going to get into the habit of using a check() function instead -- this allows for clean calling syntax like an assert:\ndef check(condition, msg=None):\n if not condition:\n raise ValueError(msg or '')\n\ncheck(gender in ('m', 'f'))\nif gender == 'm':\n greeting = 'Mr.'\nelse:\n greeting = 'Ms.'\n\nGoing back to the original question, I'd claim that using an assert() or check() prior to the if/else logic is easier to read, safer, and more explicit:\n\nit tests the data quality first before starting to act on it -- this might be important if there are operators other than '==' in the if/else chain\nit separates the assertion test from the branching logic, rather than interleaving them -- this makes reading and refactoring easier\n\n", "You can use and typing.assert_never to assert unreachability in a way that can be verified by static type checkers such as mypy and pyright.\nWith earlier versions of python (pre python3.11), assert_never is not available in the typing module, so you'll need to import it from the typing_extensions module.\nHere's an example using typing.assert_never together with typing.Literal to assert that the else-block from OP's question is unreachable:\nfrom typing import Literal\nfrom typing_extensions import assert_never\n\nvar: Literal[\"m\", \"f\"] = \"m\"\n\nif var == \"m\":\n ...\nelif var == \"f\":\n ...\nelse:\n assert_never(var) # unreachable\n\nStatic type checkers will verify that the else-block above is indeed unreachable. If the assert_never block is reached at runtime, it will throw an AssertionError:\n>>> from typing_extensions import assert_never\n>>> assert_never(123)\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/home/homestar/tmp/.direnv/python-3.9.15/lib/python3.9/site-packages/typing_extensions.py\", line 1993, in assert_never\n raise AssertionError(\"Expected code to be unreachable\")\nAssertionError: Expected code to be unreachable\n\nHere is an example with a function whose return-type annotation is typing.NoReturn:\nimport time\nfrom typing import NoReturn\nfrom typing_extensions import Never, assert_never\n\ndef foo() -> NoReturn:\n while True:\n time.sleep(1)\n\nbar: Never = foo()\n\nassert_never(bar) # unreachable\n\nThe typing.Never type is available in typing module for python >=3.11 and in typing_extensions for earlier versions of python.\nHere's another example using if/elif/else blocks to match against the type of a variable:\nfrom typing import Union\nfrom typing_extensions import assert_never\n\nvar: Union[int, str, float] = \"123\"\n\nif isinstance(var, int):\n ...\nelif isinstance(var, str):\n ...\nelif isinstance(var, float):\n ...\nelse:\n assert_never(var) # unreachable\n\nFor more examples, including matching against the values of an enum type, see the Python typing docs on Unreachable Code and Exhaustiveness Checking.\n" ]
[ 30, 9, 6, 4, 4, 4, 3, 0 ]
[]
[]
[ "python" ]
stackoverflow_0000815310_python.txt
Q: Global Variable Scope in python Should I need to Global everything Global? Considering scope? I keep randomly running into this problem, Im gonna assume its some how a syntax problem on myside. But variables out side of a scope in python seems to be inconsistent... my situation is libFound=False def Setup(): _setup_import() print('booting:',libFound) #--Here I get False? if libFound: _boot() else:print('Did not Boot') def _setup_import(): sys.path.append(PATH) try: import libwrapper global libwrapper except: #Critical Exception else: #found libFound=True print(libFound)#--Here I get True I assume this is garbage collection but I would think it would match gloval before local variables, should I have to global everything global? Scope is scope, I seem to get this often in python. I would like to include it seems to happen most when initiateing with None or it being Bool Makeing variables global,checking syntax, I've tried researching this but I don't understand if its my syntax possibly or my lack or understanding of how python is actually handleing variable A: A global variable is, by definition, bound in the global scope. But assignments to names in a function always define a new local variable, unless you declare the name as global or non-local using global or nonlocal, respectively. – chepner 10 mins ago Boom... inside the scope of _setup_libraries() I made it global and it worked, like you said inside the scope of the method it was local A: This has nothing to do with the garbage collector. I think you may misunderstand how scoping works in python. See code & comments in it below. I hope it helps. myGlobal = False def ask_global(): # To use Glboal Variable, we dont need to use global keyword print(myGlobal) def toggle_global(): # Changes the value of the global variable global myGlobal myGlobal = not myGlobal def global_false(): # Will not actually set myGlobal false, because scoping # When assigning a global without first declaring it as global, # it will be a new local variable # myGlobal = not myGlobal # would result in 'myGlobal' referenced before assignment myGlobal = False print(myGlobal) # False ask_global() # False if True: # We're in global scope here, changing the global variable myGlobal = True print(myGlobal) # True ask_global() # True global_false() print(myGlobal) # True ask_global() # True toggle_global() print(myGlobal) # False ask_global() # False
Global Variable Scope in python
Should I need to Global everything Global? Considering scope? I keep randomly running into this problem, Im gonna assume its some how a syntax problem on myside. But variables out side of a scope in python seems to be inconsistent... my situation is libFound=False def Setup(): _setup_import() print('booting:',libFound) #--Here I get False? if libFound: _boot() else:print('Did not Boot') def _setup_import(): sys.path.append(PATH) try: import libwrapper global libwrapper except: #Critical Exception else: #found libFound=True print(libFound)#--Here I get True I assume this is garbage collection but I would think it would match gloval before local variables, should I have to global everything global? Scope is scope, I seem to get this often in python. I would like to include it seems to happen most when initiateing with None or it being Bool Makeing variables global,checking syntax, I've tried researching this but I don't understand if its my syntax possibly or my lack or understanding of how python is actually handleing variable
[ "A global variable is, by definition, bound in the global scope. But assignments to names in a function always define a new local variable, unless you declare the name as global or non-local using global or nonlocal, respectively. –\nchepner\n10 mins ago\nBoom... inside the scope of _setup_libraries() I made it global and it worked, like you said inside the scope of the method it was local\n", "This has nothing to do with the garbage collector.\nI think you may misunderstand how scoping works in python.\nSee code & comments in it below. I hope it helps.\nmyGlobal = False\n\ndef ask_global():\n # To use Glboal Variable, we dont need to use global keyword\n print(myGlobal)\n\ndef toggle_global():\n # Changes the value of the global variable\n global myGlobal\n myGlobal = not myGlobal\n\ndef global_false():\n # Will not actually set myGlobal false, because scoping\n # When assigning a global without first declaring it as global, \n # it will be a new local variable\n # myGlobal = not myGlobal\n # would result in 'myGlobal' referenced before assignment\n myGlobal = False\n \n\nprint(myGlobal) # False\nask_global() # False\nif True:\n # We're in global scope here, changing the global variable\n myGlobal = True\n\nprint(myGlobal) # True\nask_global() # True\n\nglobal_false()\nprint(myGlobal) # True\nask_global() # True\n\ntoggle_global()\nprint(myGlobal) # False\nask_global() # False\n\n" ]
[ 0, 0 ]
[]
[]
[ "global_variables", "python", "scope", "types", "variable_assignment" ]
stackoverflow_0074535376_global_variables_python_scope_types_variable_assignment.txt
Q: Selenium 4 (python): stale element reference during table web scraping I am web scraping a web table that looks like the follow: | A | B | C | D | 1| Name | Surname| Route | href="link with more info"| 2| Name | Surname| Route | href="link with more info"| 3| Name | Surname| Route | href="link with more info"| links = driver.find_elements(by='xpath', value='//a[@title="route detail"]') So far so good, I get what I want. Now I want to click on the collected links to gather the info in the subpage (which I know how) and return to the main page, move to the second row, and so forth. for link in links: # links = driver.find_elements(by='xpath', value='//a[@title="route detail"]') link.click() time.sleep(2) driver.back() The code above works for the first run and then throws the error: Message: stale element reference: element is not attached to the page document I tried to add various Wait, refresh etc etc but no success. I am using selenium 4.6.0. By the way, if I execute line by line outside of the for loop with Jupyter Notebook works. I also added the find_element line inside the door loop but still doesn't work. A: By navigating to another page all previously collected by Selenium web elements (they are actually references to a physical web elements) become no more valid since the web page is re-built when you open it again. To make your code working you need to collect the links list again on the main page when you getting back. So, this code should work: links = driver.find_elements(by='xpath', value='//a[@title="route detail"]') for index, link in enumerate(links): links[index].click() # do what you want to do on the opened page driver.back() time.sleep(0.2) links = wait.until(EC.visibility_of_element_located((By.XPATH, '//a[@title="route detail"]')))
Selenium 4 (python): stale element reference during table web scraping
I am web scraping a web table that looks like the follow: | A | B | C | D | 1| Name | Surname| Route | href="link with more info"| 2| Name | Surname| Route | href="link with more info"| 3| Name | Surname| Route | href="link with more info"| links = driver.find_elements(by='xpath', value='//a[@title="route detail"]') So far so good, I get what I want. Now I want to click on the collected links to gather the info in the subpage (which I know how) and return to the main page, move to the second row, and so forth. for link in links: # links = driver.find_elements(by='xpath', value='//a[@title="route detail"]') link.click() time.sleep(2) driver.back() The code above works for the first run and then throws the error: Message: stale element reference: element is not attached to the page document I tried to add various Wait, refresh etc etc but no success. I am using selenium 4.6.0. By the way, if I execute line by line outside of the for loop with Jupyter Notebook works. I also added the find_element line inside the door loop but still doesn't work.
[ "By navigating to another page all previously collected by Selenium web elements (they are actually references to a physical web elements) become no more valid since the web page is re-built when you open it again.\nTo make your code working you need to collect the links list again on the main page when you getting back.\nSo, this code should work:\nlinks = driver.find_elements(by='xpath', value='//a[@title=\"route detail\"]')\nfor index, link in enumerate(links):\n links[index].click() \n # do what you want to do on the opened page\n driver.back()\n time.sleep(0.2)\n links = wait.until(EC.visibility_of_element_located((By.XPATH, '//a[@title=\"route detail\"]')))\n\n" ]
[ 2 ]
[]
[]
[ "python", "selenium", "selenium_webdriver", "staleelementreferenceexception", "web_scraping" ]
stackoverflow_0074535525_python_selenium_selenium_webdriver_staleelementreferenceexception_web_scraping.txt
Q: How to get count of bar plot with non-count axis? Below is my datasheet and sample graph As you can above x-axis is consist of day and y-axis is consist of tip and hue is set to sex. I want the count of bar i.e for (Male)light pink number should be 8 because there are 8 male who gave tip on sunday and likewise Female should be 1. I know how to display number on the top of the graph. I don't know how to get the count i.e 8 and 1 import seaborn as sns import matplotlib.pyplot as plt sns.set_theme(style="whitegrid") df = sns.load_dataset("tips")[1:10] print(df) ax = sns.barplot(x='day', y='tip',hue="sex", data=df, palette="tab20_r") for rect in ax.patches: y_value = rect.get_height() x_value = rect.get_x() + rect.get_width() / 2 space = 1 label = "{:.0f}".format(y_value) ax.annotate(label, (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va='bottom') plt.show() Above is the code for the sample image. If you understood my question. Any help would be great. If you didn't undertood my question you can ask me in comment. what you didn't understood There are 3 questions on SO which are completely different for somewhat reason SO is marking this question as duplicate and link for the same How to display custom values on a bar plot How to plot and annotate grouped bars in seaborn / matplotlib How to annotate a barplot with values from another column A: The following seems to work: labels = df.groupby(['sex', 'day']).size() ax = sns.barplot(x='day', y='tip',hue='sex', data=df, palette='tab20_r') for p, value in zip(ax.patches, labels): x = p.get_x() + p.get_width() / 2 y = p.get_y() + p.get_height() / 2 ax.annotate(value, (x, y), ha='center') Note: the order of the groupby matters; you want the "inner" group ('sex') first and the outer group ('days') last. For the full df (not just the first 10 rows), the same code produces: The labels contain: >>> labels sex day Male Thur 30 Fri 10 Sat 59 Sun 58 Female Thur 32 Fri 9 Sat 28 Sun 18 dtype: int64 Alternatively: ax = sns.barplot(x='day', y='tip',hue='sex', data=df, palette='tab20_r') ax.bar_label(ax.containers[0], labels=labels['Male'], label_type='center') ax.bar_label(ax.containers[1], labels=labels['Female'], label_type='center')
How to get count of bar plot with non-count axis?
Below is my datasheet and sample graph As you can above x-axis is consist of day and y-axis is consist of tip and hue is set to sex. I want the count of bar i.e for (Male)light pink number should be 8 because there are 8 male who gave tip on sunday and likewise Female should be 1. I know how to display number on the top of the graph. I don't know how to get the count i.e 8 and 1 import seaborn as sns import matplotlib.pyplot as plt sns.set_theme(style="whitegrid") df = sns.load_dataset("tips")[1:10] print(df) ax = sns.barplot(x='day', y='tip',hue="sex", data=df, palette="tab20_r") for rect in ax.patches: y_value = rect.get_height() x_value = rect.get_x() + rect.get_width() / 2 space = 1 label = "{:.0f}".format(y_value) ax.annotate(label, (x_value, y_value), xytext=(0, space), textcoords="offset points", ha='center', va='bottom') plt.show() Above is the code for the sample image. If you understood my question. Any help would be great. If you didn't undertood my question you can ask me in comment. what you didn't understood There are 3 questions on SO which are completely different for somewhat reason SO is marking this question as duplicate and link for the same How to display custom values on a bar plot How to plot and annotate grouped bars in seaborn / matplotlib How to annotate a barplot with values from another column
[ "The following seems to work:\nlabels = df.groupby(['sex', 'day']).size()\nax = sns.barplot(x='day', y='tip',hue='sex', data=df, palette='tab20_r')\nfor p, value in zip(ax.patches, labels):\n x = p.get_x() + p.get_width() / 2\n y = p.get_y() + p.get_height() / 2\n ax.annotate(value, (x, y), ha='center')\n\n\nNote: the order of the groupby matters; you want the \"inner\" group ('sex') first and the outer group ('days') last.\nFor the full df (not just the first 10 rows), the same code produces:\n\nThe labels contain:\n>>> labels\nsex day \nMale Thur 30\n Fri 10\n Sat 59\n Sun 58\nFemale Thur 32\n Fri 9\n Sat 28\n Sun 18\ndtype: int64\n\nAlternatively:\nax = sns.barplot(x='day', y='tip',hue='sex', data=df, palette='tab20_r')\nax.bar_label(ax.containers[0], labels=labels['Male'], label_type='center')\nax.bar_label(ax.containers[1], labels=labels['Female'], label_type='center')\n\n" ]
[ 1 ]
[]
[]
[ "python" ]
stackoverflow_0074534223_python.txt
Q: How to set different values of the elements of a np.arrays to different values in Python 3.8? Let I have the following np.array: >>>a=np.array([20, 10,5,10,5,10]) >>>array([20, 10, 5, 10, 5, 10]) Now, I want to replace 20 and 10 by 1 and 5 by 0. Is there a function that can do that in one step? Here is what I have tried: >>>a[a==10]=1 >>>a[a==10]=1 >>>a[a==5]=0 and I am getting my desired output, which is: >>>array([1, 1, 0, 1, 0, 1]) As you can see, I had to follow three steps in order to get my result. But I want to get my result only in one step. Is there a function that can deliver my result in one step? ** Edit: As suggested by Salvatore, I tried the following: ** import pandas as pd >>>a=np.array([[20, 5, 10, 5, 10, 7, 5]]) >>>a = pd.Series(a).replace([20,10,7,5],[1,1,1,0]).values But with the above method I am getting the following error: ValueError: Data must be 1-dimensional A: You can use the map function. list(map(lambda x: int(x in [10,20]),a)) The map function will apply the function in the first argument to all the elements in the list given as the second argument. Here the lambda function returns 0 if the element is not 10 or 20, and 1 if the element is 10 or 20. EDIT FOLLOWING THE AUTHOR'S COMMENT To keep the result as a numpy array, you can use the from iter numpy function : a = np.fromiter(map(lambda x: int(x in [10,20]),a),dtype=int) A: Despite @robinood's answer works fine, I'd prefer this way: import pandas as pd a = pd.Series(a).replace([20,10,5],[1,1,0]).values This because, for long arrays, cycling can take a great amount of time. For this reason i tested both mine and @robinood's solution on a = np.random.choice([20, 10, 5], size=10_000_000) and the results are the following: my solution: 655 ms ± 7.09 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) @robinood's solution 9.51 s ± 25.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
How to set different values of the elements of a np.arrays to different values in Python 3.8?
Let I have the following np.array: >>>a=np.array([20, 10,5,10,5,10]) >>>array([20, 10, 5, 10, 5, 10]) Now, I want to replace 20 and 10 by 1 and 5 by 0. Is there a function that can do that in one step? Here is what I have tried: >>>a[a==10]=1 >>>a[a==10]=1 >>>a[a==5]=0 and I am getting my desired output, which is: >>>array([1, 1, 0, 1, 0, 1]) As you can see, I had to follow three steps in order to get my result. But I want to get my result only in one step. Is there a function that can deliver my result in one step? ** Edit: As suggested by Salvatore, I tried the following: ** import pandas as pd >>>a=np.array([[20, 5, 10, 5, 10, 7, 5]]) >>>a = pd.Series(a).replace([20,10,7,5],[1,1,1,0]).values But with the above method I am getting the following error: ValueError: Data must be 1-dimensional
[ "You can use the map function.\nlist(map(lambda x: int(x in [10,20]),a))\n\nThe map function will apply the function in the first argument to all the elements in the list given as the second argument.\nHere the lambda function returns 0 if the element is not 10 or 20, and 1 if the element is 10 or 20.\nEDIT FOLLOWING THE AUTHOR'S COMMENT\nTo keep the result as a numpy array, you can use the from iter numpy function :\na = np.fromiter(map(lambda x: int(x in [10,20]),a),dtype=int)\n\n", "Despite @robinood's answer works fine, I'd prefer this way:\nimport pandas as pd\na = pd.Series(a).replace([20,10,5],[1,1,0]).values\n\nThis because, for long arrays, cycling can take a great amount of time. For this reason i tested both mine and @robinood's solution on a = np.random.choice([20, 10, 5], size=10_000_000) and the results are the following:\n\nmy solution: 655 ms ± 7.09 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n@robinood's solution 9.51 s ± 25.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n\n" ]
[ 2, 2 ]
[]
[]
[ "numpy", "numpy_ndarray", "python", "python_2.7", "python_3.x" ]
stackoverflow_0074533939_numpy_numpy_ndarray_python_python_2.7_python_3.x.txt
Q: Find overlapping numeric ranges between 2 columns pandas and subtract difference of another column This is a toy dataset: df = pd.DataFrame({'ID': ['A','A','A','A'], 'target': ['B','B','B','B'], 'length':[208,315,1987,3775], 'start':[139403,140668,141726,143705], 'end':[139609,140982,143711,147467]}) ID target length start end 0 A B 208 139403 139609 1 A B 315 140668 140982 2 A B 1987 141726 143711 3 A B 3775 143705 147467 I need to sum the length column taking overlapping ranges in start and end into consideration. In row 3, the start value 143705 is between the ranges of row 2's 141726-143711. 143711 - 143705 = 6 so I need to subtract 6 from the length in row 3: 3775 - 6 = 3769. Which would change the df to: ID target length start end 0 A B 208 139403 139609 1 A B 315 140668 140982 2 A B 1987 141726 143711 3 A B 3769 143705 147467 Then grouping by ID, target and summing length: df.groupby(['ID','target'])['length'].sum() ID target A B 6279 Name: length, dtype: int64 Does anyone know how I can do this in a pythonic way? Thanks so much for any help! A: A possible solution: (df.assign(length= df['start'].lt(df['end'].shift()) .mul(df['start']-df['end'].shift(fill_value=0)) .add(df['length']))) Output: ID target length start end 0 A B 208 139403 139609 1 A B 315 140668 140982 2 A B 1987 141726 143711 3 A B 3769 143705 147467
Find overlapping numeric ranges between 2 columns pandas and subtract difference of another column
This is a toy dataset: df = pd.DataFrame({'ID': ['A','A','A','A'], 'target': ['B','B','B','B'], 'length':[208,315,1987,3775], 'start':[139403,140668,141726,143705], 'end':[139609,140982,143711,147467]}) ID target length start end 0 A B 208 139403 139609 1 A B 315 140668 140982 2 A B 1987 141726 143711 3 A B 3775 143705 147467 I need to sum the length column taking overlapping ranges in start and end into consideration. In row 3, the start value 143705 is between the ranges of row 2's 141726-143711. 143711 - 143705 = 6 so I need to subtract 6 from the length in row 3: 3775 - 6 = 3769. Which would change the df to: ID target length start end 0 A B 208 139403 139609 1 A B 315 140668 140982 2 A B 1987 141726 143711 3 A B 3769 143705 147467 Then grouping by ID, target and summing length: df.groupby(['ID','target'])['length'].sum() ID target A B 6279 Name: length, dtype: int64 Does anyone know how I can do this in a pythonic way? Thanks so much for any help!
[ "A possible solution:\n(df.assign(length=\n df['start'].lt(df['end'].shift())\n .mul(df['start']-df['end'].shift(fill_value=0))\n .add(df['length'])))\n\nOutput:\n ID target length start end\n0 A B 208 139403 139609\n1 A B 315 140668 140982\n2 A B 1987 141726 143711\n3 A B 3769 143705 147467\n\n" ]
[ 1 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074535386_pandas_python.txt
Q: Cannot find reference 'pack' in 'None' I'm trying to make a basic pong game and starting by drawing a rectangle on the left side of the screen. When I run it i get the error of Cannot find reference 'pack' in 'None'. Thoughts? import tkinter as tk window = tk.Tk() window.geometry('600x600') canvas_width, canvas_height = 10,100 x1, y1 = canvas_width // 2, canvas_height // 2 canvas = tk.Canvas(window, width=canvas_width, height=canvas_height).place(x=1,y=0) canvas.pack(side=tk.LEFT) window.mainloop() A: Remove place from your canvas. The issue is that place() returns None, so your canvas object evaluates to None. You don't need place() if you're going to use pack(). Use one or the other (I prefer pack()) - and it's always a good idea to declare your widgets on one line, then add them to a geometry manager on another because, again, the geometry manager functions return None and you probably don't want that. canvas = tk.Canvas(window, width=canvas_width, height=canvas_height) canvas.pack(side=tk.LEFT) OR canvas = tk.Canvas(window, width=canvas_width, height=canvas_height) canvas.place(x=1, y=0)
Cannot find reference 'pack' in 'None'
I'm trying to make a basic pong game and starting by drawing a rectangle on the left side of the screen. When I run it i get the error of Cannot find reference 'pack' in 'None'. Thoughts? import tkinter as tk window = tk.Tk() window.geometry('600x600') canvas_width, canvas_height = 10,100 x1, y1 = canvas_width // 2, canvas_height // 2 canvas = tk.Canvas(window, width=canvas_width, height=canvas_height).place(x=1,y=0) canvas.pack(side=tk.LEFT) window.mainloop()
[ "Remove place from your canvas. The issue is that place() returns None, so your canvas object evaluates to None. You don't need place() if you're going to use pack().\nUse one or the other (I prefer pack()) - and it's always a good idea to declare your widgets on one line, then add them to a geometry manager on another because, again, the geometry manager functions return None and you probably don't want that.\ncanvas = tk.Canvas(window, width=canvas_width, height=canvas_height)\ncanvas.pack(side=tk.LEFT)\n\nOR\ncanvas = tk.Canvas(window, width=canvas_width, height=canvas_height)\ncanvas.place(x=1, y=0)\n\n" ]
[ 2 ]
[]
[]
[ "python", "tkinter", "tkinter_canvas" ]
stackoverflow_0074535832_python_tkinter_tkinter_canvas.txt
Q: How do I convert this list of dictionaries to a csv file? I have a list of dictionaries that looks something like this: toCSV = [{'name':'bob','age':25,'weight':200},{'name':'jim','age':31,'weight':180}] What should I do to convert this to a csv file that looks something like this: name,age,weight bob,25,200 jim,31,180 A: import csv to_csv = [ {'name': 'bob', 'age': 25, 'weight': 200}, {'name': 'jim', 'age': 31, 'weight': 180}, ] keys = to_csv[0].keys() with open('people.csv', 'w', newline='') as output_file: dict_writer = csv.DictWriter(output_file, keys) dict_writer.writeheader() dict_writer.writerows(to_csv) A: In python 3 things are a little different, but way simpler and less error prone. It's a good idea to tell the CSV your file should be opened with utf8 encoding, as it makes that data more portable to others (assuming you aren't using a more restrictive encoding, like latin1) import csv toCSV = [{'name':'bob','age':25,'weight':200}, {'name':'jim','age':31,'weight':180}] with open('people.csv', 'w', encoding='utf8', newline='') as output_file: fc = csv.DictWriter(output_file, fieldnames=toCSV[0].keys(), ) fc.writeheader() fc.writerows(toCSV) Note that csv in python 3 needs the newline='' parameter, otherwise you get blank lines in your CSV when opening in excel/opencalc. Alternatively: I prefer use to the csv handler in the pandas module. I find it is more tolerant of encoding issues, and pandas will automatically convert string numbers in CSVs into the correct type (int,float,etc) when loading the file. import pandas dataframe = pandas.read_csv(filepath) list_of_dictionaries = dataframe.to_dict('records') dataframe.to_csv(filepath) Note: pandas will take care of opening the file for you if you give it a path, and will default to utf8 in python3, and figure out headers too. a dataframe is not the same structure as what CSV gives you, so you add one line upon loading to get the same thing: dataframe.to_dict('records') pandas also makes it much easier to control the order of columns in your csv file. By default, they're alphabetical, but you can specify the column order. With vanilla csv module, you need to feed it an OrderedDict or they'll appear in a random order (if working in python < 3.5). See: Preserving column order in Python Pandas DataFrame for more. A: this is when you have one dictionary list: import csv with open('names.csv', 'w') as csvfile: fieldnames = ['first_name', 'last_name'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() writer.writerow({'first_name': 'Baked', 'last_name': 'Beans'}) A: Because @User and @BiXiC asked for help with UTF-8 here a variation of the solution by @Matthew. (I'm not allowed to comment, so I'm answering.) import unicodecsv as csv toCSV = [{'name':'bob','age':25,'weight':200}, {'name':'jim','age':31,'weight':180}] keys = toCSV[0].keys() with open('people.csv', 'wb') as output_file: dict_writer = csv.DictWriter(output_file, keys) dict_writer.writeheader() dict_writer.writerows(toCSV) A: Here is another, more general solution assuming you don't have a list of rows (maybe they don't fit in memory) or a copy of the headers (maybe the write_csv function is generic): def gen_rows(): yield OrderedDict(name='bob', age=25, weight=200) yield OrderedDict(name='jim', age=31, weight=180) def write_csv(): it = genrows() first_row = it.next() # __next__ in py3 with open("people.csv", "w") as outfile: wr = csv.DictWriter(outfile, fieldnames=list(first_row)) wr.writeheader() wr.writerow(first_row) wr.writerows(it) Note: the OrderedDict constructor used here only preserves order in python >3.4. If order is important, use the OrderedDict([('name', 'bob'),('age',25)]) form. A: import csv with open('file_name.csv', 'w') as csv_file: writer = csv.writer(csv_file) writer.writerow(('colum1', 'colum2', 'colum3')) for key, value in dictionary.items(): writer.writerow([key, value[0], value[1]]) This would be the simplest way to write data to .csv file A: import csv toCSV = [{'name':'bob','age':25,'weight':200}, {'name':'jim','age':31,'weight':180}] header=['name','age','weight'] try: with open('output'+str(date.today())+'.csv',mode='w',encoding='utf8',newline='') as output_to_csv: dict_csv_writer = csv.DictWriter(output_to_csv, fieldnames=header,dialect='excel') dict_csv_writer.writeheader() dict_csv_writer.writerows(toCSV) print('\nData exported to csv succesfully and sample data') except IOError as io: print('\n',io) A: a short solution with Pandas import pandas as pd list_of_dicts = [ {'name': 'bob', 'age': 25, 'weight': 200}, {'name': 'jim', 'age': 31, 'weight': 180}, ] df = pd.DataFrame(list_of_dicts) df.to_csv("names.csv", index=False)
How do I convert this list of dictionaries to a csv file?
I have a list of dictionaries that looks something like this: toCSV = [{'name':'bob','age':25,'weight':200},{'name':'jim','age':31,'weight':180}] What should I do to convert this to a csv file that looks something like this: name,age,weight bob,25,200 jim,31,180
[ "import csv\n\nto_csv = [\n {'name': 'bob', 'age': 25, 'weight': 200},\n {'name': 'jim', 'age': 31, 'weight': 180},\n]\n\nkeys = to_csv[0].keys()\n\nwith open('people.csv', 'w', newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(to_csv)\n\n", "In python 3 things are a little different, but way simpler and less error prone. It's a good idea to tell the CSV your file should be opened with utf8 encoding, as it makes that data more portable to others (assuming you aren't using a more restrictive encoding, like latin1)\nimport csv\ntoCSV = [{'name':'bob','age':25,'weight':200},\n {'name':'jim','age':31,'weight':180}]\nwith open('people.csv', 'w', encoding='utf8', newline='') as output_file:\n fc = csv.DictWriter(output_file, \n fieldnames=toCSV[0].keys(),\n\n )\n fc.writeheader()\n fc.writerows(toCSV)\n\n\nNote that csv in python 3 needs the newline='' parameter, otherwise you get blank lines in your CSV when opening in excel/opencalc.\n\nAlternatively: I prefer use to the csv handler in the pandas module. I find it is more tolerant of encoding issues, and pandas will automatically convert string numbers in CSVs into the correct type (int,float,etc) when loading the file. \nimport pandas\ndataframe = pandas.read_csv(filepath)\nlist_of_dictionaries = dataframe.to_dict('records')\ndataframe.to_csv(filepath)\n\nNote:\n\npandas will take care of opening the file for you if you give it a path, and will default to utf8 in python3, and figure out headers too.\na dataframe is not the same structure as what CSV gives you, so you add one line upon loading to get the same thing: dataframe.to_dict('records')\npandas also makes it much easier to control the order of columns in your csv file. By default, they're alphabetical, but you can specify the column order. With vanilla csv module, you need to feed it an OrderedDict or they'll appear in a random order (if working in python < 3.5). See: Preserving column order in Python Pandas DataFrame for more.\n\n", "this is when you have one dictionary list:\nimport csv\nwith open('names.csv', 'w') as csvfile:\n fieldnames = ['first_name', 'last_name']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerow({'first_name': 'Baked', 'last_name': 'Beans'})\n\n", "Because @User and @BiXiC asked for help with UTF-8 here a variation of the solution by @Matthew. (I'm not allowed to comment, so I'm answering.)\nimport unicodecsv as csv\ntoCSV = [{'name':'bob','age':25,'weight':200},\n {'name':'jim','age':31,'weight':180}]\nkeys = toCSV[0].keys()\nwith open('people.csv', 'wb') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(toCSV)\n\n", "Here is another, more general solution assuming you don't have a list of rows (maybe they don't fit in memory) or a copy of the headers (maybe the write_csv function is generic):\ndef gen_rows():\n yield OrderedDict(name='bob', age=25, weight=200)\n yield OrderedDict(name='jim', age=31, weight=180)\n\ndef write_csv():\n it = genrows()\n first_row = it.next() # __next__ in py3\n with open(\"people.csv\", \"w\") as outfile:\n wr = csv.DictWriter(outfile, fieldnames=list(first_row))\n wr.writeheader()\n wr.writerow(first_row)\n wr.writerows(it)\n\nNote: the OrderedDict constructor used here only preserves order in python >3.4. If order is important, use the OrderedDict([('name', 'bob'),('age',25)]) form.\n", "import csv\n\nwith open('file_name.csv', 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(('colum1', 'colum2', 'colum3'))\n for key, value in dictionary.items():\n writer.writerow([key, value[0], value[1]])\n\nThis would be the simplest way to write data to .csv file\n", "import csv\ntoCSV = [{'name':'bob','age':25,'weight':200},\n {'name':'jim','age':31,'weight':180}]\nheader=['name','age','weight'] \ntry:\n with open('output'+str(date.today())+'.csv',mode='w',encoding='utf8',newline='') as output_to_csv:\n dict_csv_writer = csv.DictWriter(output_to_csv, fieldnames=header,dialect='excel')\n dict_csv_writer.writeheader()\n dict_csv_writer.writerows(toCSV)\n print('\\nData exported to csv succesfully and sample data')\nexcept IOError as io:\n print('\\n',io)\n\n", "a short solution with Pandas\nimport pandas as pd\n\nlist_of_dicts = [\n {'name': 'bob', 'age': 25, 'weight': 200},\n {'name': 'jim', 'age': 31, 'weight': 180},\n]\n\ndf = pd.DataFrame(list_of_dicts) \ndf.to_csv(\"names.csv\", index=False)\n\n" ]
[ 440, 35, 18, 9, 2, 2, 1, 1 ]
[]
[]
[ "csv", "data_conversion", "dictionary", "python" ]
stackoverflow_0003086973_csv_data_conversion_dictionary_python.txt
Q: Vectors and Matrices from the NumPy Module In python, how to write program that create two 4 * 4 matrices A and B whose elements are random numbers. Then create a matrix C that looks like C = ⎡A B⎤ ⎣B A⎦ Find the diagonal of the matrix C. The diagonal elements are to be presented in a 4 * 2 matrix. import numpy as np matrix_A = np.random.randint(10, size=(4, 4)) matrix_B = np.random.randint(10, size=(4, 4)) matrix_C = np.array([[matrix_A, matrix_B], [matrix_B, matrix_A]]) d= matrix_C.diagonal() D=d.reshape(2,4) print(f'This is matrix C:\n{matrix_C}') print(f'These are the diagonals of Matrix C:\n{D}') A: The construction matrix_C = np.array([[matrix_A, matrix_B], [matrix_B, matrix_A]]) does not concatenate matrices, but creates 4th order tensor (put matrices inside matrix). You can check that by print(matrix_C.shape) # (2, 2, 4, 4) To lay out blocks call np.block, then all other parts of your code should work fine: matrix_C = np.block([[matrix_A, matrix_B], [matrix_B, matrix_A]]) print(matrix_C.shape) # (8, 8) d= matrix_C.diagonal() D=d.reshape(2,4) # np.array([matrix_A.diagonal(), matrix_A.diagonal()])
Vectors and Matrices from the NumPy Module
In python, how to write program that create two 4 * 4 matrices A and B whose elements are random numbers. Then create a matrix C that looks like C = ⎡A B⎤ ⎣B A⎦ Find the diagonal of the matrix C. The diagonal elements are to be presented in a 4 * 2 matrix. import numpy as np matrix_A = np.random.randint(10, size=(4, 4)) matrix_B = np.random.randint(10, size=(4, 4)) matrix_C = np.array([[matrix_A, matrix_B], [matrix_B, matrix_A]]) d= matrix_C.diagonal() D=d.reshape(2,4) print(f'This is matrix C:\n{matrix_C}') print(f'These are the diagonals of Matrix C:\n{D}')
[ "The construction\nmatrix_C = np.array([[matrix_A, matrix_B], [matrix_B, matrix_A]])\n\ndoes not concatenate matrices, but creates 4th order tensor (put matrices inside matrix). You can check that by\nprint(matrix_C.shape) # (2, 2, 4, 4)\n\nTo lay out blocks call np.block, then all other parts of your code should work fine:\nmatrix_C = np.block([[matrix_A, matrix_B], [matrix_B, matrix_A]])\nprint(matrix_C.shape) # (8, 8)\nd= matrix_C.diagonal()\nD=d.reshape(2,4) # np.array([matrix_A.diagonal(), matrix_A.diagonal()])\n\n" ]
[ 0 ]
[]
[]
[ "numpy", "python" ]
stackoverflow_0074534575_numpy_python.txt
Q: Logging setLevel is being ignored The below code is copied from the documentation. I am supposed to be able to see all the info logs. But I don't. I am only able to see the warn and above even though I've set setLevel to INFO. Why is this happening? foo.py: import logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.debug('debug message') logger.info('info message') logger.warn('warn message') logger.error('error message') logger.critical('critical message') Output: workingDirectory$ python foo.py warn message error message critical message Where did the info and debug messages go?? A: Replace the line logger.setLevel(logging.DEBUG) with logging.basicConfig(level=logging.DEBUG, format='%(message)s') and it should work as expected. If you don't configure logging with any handlers (as in your post - you only configure a level for your logger, but no handlers anywhere), you'll get an internal handler "of last resort" which is set to output just the message (with no other formatting) at the WARNING level. A: Try running logging.basicConfig() in there. Of note, I see you mention INFO, but use DEBUG. As written, it should show all five messages. Swap out DEBUG with INFO, and you should see four messages. import logging logging.basicConfig() logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.debug('debug message') logger.info('info message') logger.warn('warn message') logger.error('error message') logger.critical('critical message') edit: Do you have logging set up elsewhere in your code already? Can't reproduce the exact behavior you note with the specific code provided. A: As pointed by some users, using: logging.basicConfig(level=logging.DEBUG, format='%(message)s') like written in the accepted answer is not a good option because it sets the log level for the root logger, so it may lead to unexpected behaviours (eg. third party libraries may start to log debug messages if you set loglevel=logging.DEBUG) In my opinion the best solution is to set log level just for your logger, like this: import logging logger = logging.getLogger('MyLogger') handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) Not really intuitive solution, but is necessary if you want to set log level only for 'MyLogger' and leave the root logger untouched. So, why is logging.basicConfig(level=logging.DEBUG, format='%(message)s') setting the log level globally? Well, actually it doesn't. As said, it's just changing the configuration of the root logger and, as described in the python documentation: Loggers should NEVER be instantiated directly, but always through the module-level function logging.getLogger(name). Multiple calls to getLogger() with the same name will always return a reference to the same Logger object. So, logging.basicConfig is creating a StreamHandler with a default Formatter and adding it to the root logger. The point is that if any other library is using the "root logger", you're going to set that log level for that library too so it can happen that you start to see debug logs from third party libraries. This is why I think it's better to create your own logger and set your own formatters and handlers, so you can leave the root logger untouched. A: This is technically also an "answer", because it can "solve" the problem. BUT I definitely DO NOT like it. It is not intuitive, and I lost 2+ hours over it. Before: import logging logger = logging.getLogger('foo') logger.setLevel(logging.INFO) logger.info('You can not see me') # Or you can just use the following one-liner in command line. # $ python -c "import logging; logger = logging.getLogger('foo'); logger.setLevel(logging.INFO); logger.info('You can not see me')" After: import logging logging.debug('invisible magic') # <-- magic logger = logging.getLogger('foo') logger.setLevel(logging.INFO) logger.info('But now you can see me') # Or you can just use the following one-liner in command line. $ python -c "import logging; logging.debug('invisible magic'); logger = logging.getLogger('foo'); logger.setLevel(logging.INFO); logger.info('But now you see me')" PS: Comparing it to the current chosen answer, and @Vinay-Sajip's explanation, I can kind of understand why. But still, I wish it was not working that way. A: If you want this to work WITHOUT basicConfig, you have to first set up the lowest possible level you'll log onto the logger. Since the logger sets a minimum threshold, handlers which have a lower threshold but belong to the same logger won't get those lower threshold messages since they're ignored by the logger in the first place. Intuitive, but not obvious. We start by doing this: lgr = logging.getLogger(name) lgr.setLevel(logging.DEBUG) Then, set up the handlers with the different levels you need, in my case I want DEBUG logging on stdout and INFO logging to a rotating file, so I do the following: rot_hndlr = RotatingFileHandler('filename.log', maxBytes=log_size, backupCount=3) rot_hndlr.setFormatter(formatter) rot_hndlr.setLevel(logging.INFO) lgr.addHandler(rot_hndlr) stream_hndlr = logging.StreamHandler() stream_hndlr.setFormatter(stream_formatter) lgr.addHandler(stream_hndlr) Then, to test, I do this: lgr.debug("Hello") lgr.info("There") My stdout (console) will look like this: Hello There and my filename.log file will look like this: There A: In short, change the level in logging.basicConfig will influence the global settings. You should better set level for each logger and the specific handler in the logger. The following is an example that displays all levels on the console and only records messages >= errors in log_file.log. Notice the level for each handler is different. import logging # Define logger logger = logging.getLogger('test') # Set level for logger logger.setLevel(logging.DEBUG) # Define the handler and formatter for console logging consoleHandler = logging.StreamHandler() # Define StreamHandler consoleHandler.setLevel(logging.DEBUG) # Set level concolsFormatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') # Define formatter consoleHandler.setFormatter(concolsFormatter) # Set formatter logger.addHandler(consoleHandler) # Add handler to logger # Define the handler and formatter for file logging log_file = 'log_file' fileHandler = logging.FileHandler(f'{log_file}.log') # Define FileHandler fileHandler.setLevel(logging.ERROR) # Set level fileFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Define formatter fileHandler.setFormatter(fileFormatter) # Set formatter logger.addHandler(fileHandler) # Add handler to logger # Test logger.debug('This is a debug') logger.info('This is an info') logger.warning('This is a warning') logger.error('This is an error') logger.critical('This is a critical') Console output: # Test test - DEBUG - This is a debug test - INFO - This is an info test - WARNING - This is a warning test - ERROR - This is an error test - CRITICAL - This is a critical File log_file.log content: 2021-09-22 12:50:50,938 - test - ERROR - This is an error 2021-09-22 12:50:50,938 - test - CRITICAL - This is a critical To review your logger's level: logger.level The result should be one of the following: 10 # DEBUG 20 # INFO 30 # WARNING 40 # ERROR 50 # CRITICAL To review your handlers's levels: logger.handlers [<StreamHandler stderr (DEBUG)>, <FileHandler ***/log_file.log (ERROR)>] A: The accepted answer does not work for me on Win10, Python 3.7.2. My solution: logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) It's order sensitive. A: You have to set the basicConfig of the root logger to DEBUG, then you can set the level of your individual loggers to more restrictive levels. This is not what I expected. Here is what I had to do: #!/usr/bin/env python3 import logging # by default this is WARNING. Leaving it as WARNING here overrides # whatever setLevel-ing you do later so it seems they are ignored. logging.basicConfig(level=logging.DEBUG) l = logging.getLogger(__name__) l.setLevel(level=logging.INFO) # if I hadn't called basicConfig with DEBUG level earlier, # info messages would STILL not be shown despite calling # setLevel above. However now debug messages will not be shown # for l because setLevel set it to INFO l.warning('A warning message will be displayed') l.info('A friendly info message will be displayed') l.debug('A friendly debug message will not be displayed') A: Most of the answers that I've found for this issue uses the basicConfig of the root logger. It's not helpful for those who intend to use multiple independent loggers that were not initialised with basicConfig. The use of basicConfig implies that the loglevels of ALL loggers will be changed. It also had the unfortunate side effect of generating duplicate logs. So I tried over several days experimenting with different ways to manipulate the loglevels and came up with one that finally worked. The trick was to not only change the log levels of all the handlers but also the all the handlers of the parent of the logger. def setLevel(self, infoLevel): # To dynamically reset the loglevel, you need to also change the parent levels as well as all handlers! self.logger.parent.setLevel(infoLevel) for handler in self.logger.parent.handlers: handler.setLevel(infoLevel) self.logger.setLevel(infoLevel) for handler in self.logger.handlers: handler.setLevel(infoLevel) The inspiration came from the fact that the basicConfig changes the root logger settings, so I was trying to do the same without using basicConfig. For those that are interested, I did a little Python project on Github that illustrates the different issues with setting loglevel of the logger (it works partially), proves the SLogger (Sample Logger) implementation works, and also illustrates the duplicate log issue with basicConfig when using multiple loggers not initialised with it. https://github.com/FrancisChung/python-logging-playground TLDR: If you're only interested in a working sample code for the logger, the implentation is listed below import logging CRITICAL = 50 FATAL = CRITICAL ERROR = 40 WARNING = 30 WARN = WARNING INFO = 20 DEBUG = 10 NOTSET = 0 class SLogger(): """ SLogger : Sample Logger class using the standard Python logging Library Parameters: name : Name of the Logger infoLevel : logging level of the Logger (e.g. logging.DEBUG/INFO/WARNING/ERROR) """ def __init__(self, name: str, infoLevel=logging.INFO): try: if name is None: raise ValueError("Name argument not specified") logformat = '%(asctime)s %(levelname)s [%(name)s %(funcName)s] %(message)s' self.logformat = logformat self.name = name.upper() self.logger = logging.getLogger(self.name) self.logger.setLevel(infoLevel) self.add_consolehandler(infoLevel, logformat) except Exception as e: if self.logger: self.logger.error(str(e)) def error(self, message): self.logger.error(message) def info(self, message): self.logger.info(message) def warning(self, message): self.logger.warning(message) def debug(self, message): self.logger.debug(message) def critical(self, message): self.logger.critical(message) def setLevel(self, infoLevel): # To dynamically reset the loglevel, you need to also change the parent levels as well as all handlers! self.logger.parent.setLevel(infoLevel) for handler in self.logger.parent.handlers: handler.setLevel(infoLevel) self.logger.setLevel(infoLevel) for handler in self.logger.handlers: handler.setLevel(infoLevel) return self.logger.level def add_consolehandler(self, infoLevel=logging.INFO, logformat='%(asctime)s %(levelname)s [%(name)s %(funcName)s] %(message)s'): sh = logging.StreamHandler() sh.setLevel(infoLevel) formatter = logging.Formatter(logformat) sh.setFormatter(formatter) self.logger.addHandler(sh) A: Create object the right way, e.g. inspired by Google: import logging formatter = logging.Formatter('%(asctime)s %(threadName)s: %(message)s') log = logging.getLogger(__name__) log.setLevel(logging.DEBUG) handler = logging.StreamHandler() handler.setFormatter(formatter) log.addHandler(handler) log.debug('debug message') log.info('info message') log.warn('warn message') log.error('error message') log.critical('critical message') 2022-11-22 23:17:59,342 MainThread: debug message 2022-11-22 23:17:59,342 MainThread: info message 2022-11-22 23:17:59,342 MainThread: warn message 2022-11-22 23:17:59,342 MainThread: error message 2022-11-22 23:17:59,342 MainThread: critical message
Logging setLevel is being ignored
The below code is copied from the documentation. I am supposed to be able to see all the info logs. But I don't. I am only able to see the warn and above even though I've set setLevel to INFO. Why is this happening? foo.py: import logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.debug('debug message') logger.info('info message') logger.warn('warn message') logger.error('error message') logger.critical('critical message') Output: workingDirectory$ python foo.py warn message error message critical message Where did the info and debug messages go??
[ "Replace the line\nlogger.setLevel(logging.DEBUG)\n\nwith\nlogging.basicConfig(level=logging.DEBUG, format='%(message)s')\n\nand it should work as expected. If you don't configure logging with any handlers (as in your post - you only configure a level for your logger, but no handlers anywhere), you'll get an internal handler \"of last resort\" which is set to output just the message (with no other formatting) at the WARNING level.\n", "Try running logging.basicConfig() in there. Of note, I see you mention INFO, but use DEBUG. As written, it should show all five messages. Swap out DEBUG with INFO, and you should see four messages.\nimport logging\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nlogger.debug('debug message')\nlogger.info('info message')\nlogger.warn('warn message')\nlogger.error('error message')\nlogger.critical('critical message')\n\nedit: Do you have logging set up elsewhere in your code already? Can't reproduce the exact behavior you note with the specific code provided.\n", "As pointed by some users, using:\nlogging.basicConfig(level=logging.DEBUG, format='%(message)s')\n\nlike written in the accepted answer is not a good option because it sets the log level for the root logger, so it may lead to unexpected behaviours (eg. third party libraries may start to log debug messages if you set loglevel=logging.DEBUG)\nIn my opinion the best solution is to set log level just for your logger, like this:\nimport logging\n\nlogger = logging.getLogger('MyLogger')\nhandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n\nNot really intuitive solution, but is necessary if you want to set log level only for 'MyLogger' and leave the root logger untouched.\nSo, why is logging.basicConfig(level=logging.DEBUG, format='%(message)s') setting the log level globally?\nWell, actually it doesn't. As said, it's just changing the configuration of the root logger and, as described in the python documentation:\n\nLoggers should NEVER be instantiated directly, but always through the\nmodule-level function logging.getLogger(name). Multiple calls to\ngetLogger() with the same name will always return a reference to the\nsame Logger object.\n\nSo, logging.basicConfig is creating a StreamHandler with a default Formatter and adding it to the root logger.\nThe point is that if any other library is using the \"root logger\", you're going to set that log level for that library too so it can happen that you start to see debug logs from third party libraries.\nThis is why I think it's better to create your own logger and set your own formatters and handlers, so you can leave the root logger untouched.\n", "This is technically also an \"answer\", because it can \"solve\" the problem. BUT I definitely DO NOT like it. It is not intuitive, and I lost 2+ hours over it.\nBefore:\nimport logging\nlogger = logging.getLogger('foo')\nlogger.setLevel(logging.INFO)\nlogger.info('You can not see me')\n# Or you can just use the following one-liner in command line.\n# $ python -c \"import logging; logger = logging.getLogger('foo'); logger.setLevel(logging.INFO); logger.info('You can not see me')\"\n\nAfter:\nimport logging\n\nlogging.debug('invisible magic') # <-- magic\n\nlogger = logging.getLogger('foo')\nlogger.setLevel(logging.INFO)\nlogger.info('But now you can see me')\n# Or you can just use the following one-liner in command line.\n$ python -c \"import logging; logging.debug('invisible magic'); logger = logging.getLogger('foo'); logger.setLevel(logging.INFO); logger.info('But now you see me')\"\n\nPS: Comparing it to the current chosen answer, and @Vinay-Sajip's explanation, I can kind of understand why. But still, I wish it was not working that way.\n", "If you want this to work WITHOUT basicConfig, you have to first set up the lowest possible level you'll log onto the logger. Since the logger sets a minimum threshold, handlers which have a lower threshold but belong to the same logger won't get those lower threshold messages since they're ignored by the logger in the first place. Intuitive, but not obvious.\nWe start by doing this:\nlgr = logging.getLogger(name)\nlgr.setLevel(logging.DEBUG)\n\nThen, set up the handlers with the different levels you need, in my case I want DEBUG logging on stdout and INFO logging to a rotating file, so I do the following:\nrot_hndlr = RotatingFileHandler('filename.log',\n maxBytes=log_size,\n backupCount=3)\n \nrot_hndlr.setFormatter(formatter)\nrot_hndlr.setLevel(logging.INFO)\nlgr.addHandler(rot_hndlr)\n\nstream_hndlr = logging.StreamHandler()\nstream_hndlr.setFormatter(stream_formatter)\nlgr.addHandler(stream_hndlr)\n\nThen, to test, I do this:\nlgr.debug(\"Hello\")\nlgr.info(\"There\")\n\nMy stdout (console) will look like this:\nHello\nThere\n\nand my filename.log file will look like this:\nThere\n\n", "In short, change the level in logging.basicConfig will influence the global settings.\nYou should better set level for each logger and the specific handler in the logger.\nThe following is an example that displays all levels on the console and only records messages >= errors in log_file.log. Notice the level for each handler is different.\nimport logging\n# Define logger\nlogger = logging.getLogger('test')\n\n# Set level for logger\nlogger.setLevel(logging.DEBUG)\n\n# Define the handler and formatter for console logging\nconsoleHandler = logging.StreamHandler() # Define StreamHandler\nconsoleHandler.setLevel(logging.DEBUG) # Set level\nconcolsFormatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') # Define formatter\nconsoleHandler.setFormatter(concolsFormatter) # Set formatter\nlogger.addHandler(consoleHandler) # Add handler to logger\n\n# Define the handler and formatter for file logging\nlog_file = 'log_file'\nfileHandler = logging.FileHandler(f'{log_file}.log') # Define FileHandler\nfileHandler.setLevel(logging.ERROR) # Set level\nfileFormatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Define formatter\nfileHandler.setFormatter(fileFormatter) # Set formatter\nlogger.addHandler(fileHandler) # Add handler to logger\n\n# Test\nlogger.debug('This is a debug')\nlogger.info('This is an info')\nlogger.warning('This is a warning')\nlogger.error('This is an error')\nlogger.critical('This is a critical')\n\nConsole output:\n# Test\ntest - DEBUG - This is a debug\ntest - INFO - This is an info\ntest - WARNING - This is a warning\ntest - ERROR - This is an error\ntest - CRITICAL - This is a critical\n\nFile log_file.log content:\n2021-09-22 12:50:50,938 - test - ERROR - This is an error\n2021-09-22 12:50:50,938 - test - CRITICAL - This is a critical\n\n\nTo review your logger's level:\nlogger.level\n\nThe result should be one of the following:\n10 # DEBUG\n20 # INFO\n30 # WARNING\n40 # ERROR\n50 # CRITICAL\n\nTo review your handlers's levels:\nlogger.handlers\n\n[<StreamHandler stderr (DEBUG)>,\n <FileHandler ***/log_file.log (ERROR)>]\n\n", "The accepted answer does not work for me on Win10, Python 3.7.2.\nMy solution:\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nIt's order sensitive.\n", "You have to set the basicConfig of the root logger to DEBUG, then you can set the level of your individual loggers to more restrictive levels.\nThis is not what I expected. Here is what I had to do:\n#!/usr/bin/env python3\n\nimport logging\n# by default this is WARNING. Leaving it as WARNING here overrides \n# whatever setLevel-ing you do later so it seems they are ignored.\nlogging.basicConfig(level=logging.DEBUG)\n\nl = logging.getLogger(__name__)\nl.setLevel(level=logging.INFO)\n# if I hadn't called basicConfig with DEBUG level earlier, \n# info messages would STILL not be shown despite calling \n# setLevel above. However now debug messages will not be shown \n# for l because setLevel set it to INFO\n\nl.warning('A warning message will be displayed')\nl.info('A friendly info message will be displayed')\nl.debug('A friendly debug message will not be displayed')\n\n", "Most of the answers that I've found for this issue uses the basicConfig of the root logger.\nIt's not helpful for those who intend to use multiple independent loggers that were not initialised with basicConfig. The use of basicConfig implies that the loglevels of ALL loggers will be changed. It also had the unfortunate side effect of generating duplicate logs.\nSo I tried over several days experimenting with different ways to manipulate the loglevels and came up with one that finally worked.\nThe trick was to not only change the log levels of all the handlers but also the all the handlers of the parent of the logger.\n def setLevel(self, infoLevel):\n # To dynamically reset the loglevel, you need to also change the parent levels as well as all handlers!\n\n self.logger.parent.setLevel(infoLevel)\n for handler in self.logger.parent.handlers:\n handler.setLevel(infoLevel)\n\n self.logger.setLevel(infoLevel)\n for handler in self.logger.handlers:\n handler.setLevel(infoLevel)\n\nThe inspiration came from the fact that the basicConfig changes the root logger settings, so I was trying to do the same without using basicConfig.\nFor those that are interested, I did a little Python project on Github that illustrates the different issues with setting loglevel of the logger (it works partially), proves the SLogger (Sample Logger) implementation works, and also illustrates the duplicate log issue with basicConfig when using multiple loggers not initialised with it.\nhttps://github.com/FrancisChung/python-logging-playground\nTLDR: If you're only interested in a working sample code for the logger, the implentation is listed below\nimport logging\n\nCRITICAL = 50\nFATAL = CRITICAL\nERROR = 40\nWARNING = 30\nWARN = WARNING\nINFO = 20\nDEBUG = 10\nNOTSET = 0\n\n\nclass SLogger():\n \"\"\"\n SLogger : Sample Logger class using the standard Python logging Library\n\n Parameters:\n name : Name of the Logger\n infoLevel : logging level of the Logger (e.g. logging.DEBUG/INFO/WARNING/ERROR)\n \"\"\"\n\n def __init__(self, name: str, infoLevel=logging.INFO):\n try:\n if name is None:\n raise ValueError(\"Name argument not specified\")\n\n logformat = '%(asctime)s %(levelname)s [%(name)s %(funcName)s] %(message)s'\n self.logformat = logformat\n self.name = name.upper()\n self.logger = logging.getLogger(self.name)\n self.logger.setLevel(infoLevel)\n\n self.add_consolehandler(infoLevel, logformat)\n\n except Exception as e:\n if self.logger:\n self.logger.error(str(e))\n\n def error(self, message):\n self.logger.error(message)\n\n def info(self, message):\n self.logger.info(message)\n\n def warning(self, message):\n self.logger.warning(message)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def critical(self, message):\n self.logger.critical(message)\n\n def setLevel(self, infoLevel):\n # To dynamically reset the loglevel, you need to also change the parent levels as well as all handlers!\n self.logger.parent.setLevel(infoLevel)\n for handler in self.logger.parent.handlers:\n handler.setLevel(infoLevel)\n\n self.logger.setLevel(infoLevel)\n for handler in self.logger.handlers:\n handler.setLevel(infoLevel)\n\n return self.logger.level\n\n def add_consolehandler(self, infoLevel=logging.INFO,\n logformat='%(asctime)s %(levelname)s [%(name)s %(funcName)s] %(message)s'):\n sh = logging.StreamHandler()\n sh.setLevel(infoLevel)\n\n formatter = logging.Formatter(logformat)\n sh.setFormatter(formatter)\n self.logger.addHandler(sh)\n\n", "Create object the right way, e.g. inspired by Google:\nimport logging\n\nformatter = logging.Formatter('%(asctime)s %(threadName)s: %(message)s')\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\nhandler = logging.StreamHandler()\nhandler.setFormatter(formatter)\nlog.addHandler(handler)\n\nlog.debug('debug message')\nlog.info('info message')\nlog.warn('warn message')\nlog.error('error message')\nlog.critical('critical message')\n\n2022-11-22 23:17:59,342 MainThread: debug message\n2022-11-22 23:17:59,342 MainThread: info message\n2022-11-22 23:17:59,342 MainThread: warn message\n2022-11-22 23:17:59,342 MainThread: error message\n2022-11-22 23:17:59,342 MainThread: critical message\n\n" ]
[ 68, 49, 40, 5, 3, 2, 1, 1, 1, 0 ]
[]
[]
[ "logging", "python" ]
stackoverflow_0043109355_logging_python.txt
Q: How do I sort a class with multiple attributes? Suppose I have a class named Fish. now I would ask for a user input to give the name and the size of the fish. now, How can I sort those input values by the size attribute(in decreasing order) and then the name attribute(alphabetically)? class Fish: def __init__(self, size, name): self.size:int = int(size) self.name:str = name pass def main(): t = input() for example the user input the following: d 3 a 1 c 2 b 1 after the sorting it should be: (it sorted first by the sizes then if the sizes are the same it sorted by the names) d 3 c 2 a 1 b 1 A: In order to override or declare sorting for an object, you should override the comparison operators. You need to specify one of (=, !=) and one of (>, <). class Fish: def __init__(self, size, name): self.size = int(size) self.name = name def __eq__(self,other): return self.size == other.size and self.name==other.name def __gt__(self,other): if self.size != other.size: return self.size > other.size else: return self.name < other.name An example f1 = Fish(1,"a") f2 = Fish(3,'d') f3 = Fish(2,'c') f4 = Fish(1,'b') fishes = [f1,f2,f3,f4] for fish in sorted(fishes, reverse = True): print(fish.name, fish.size) Output: d 3 c 2 a 1 b 1
How do I sort a class with multiple attributes?
Suppose I have a class named Fish. now I would ask for a user input to give the name and the size of the fish. now, How can I sort those input values by the size attribute(in decreasing order) and then the name attribute(alphabetically)? class Fish: def __init__(self, size, name): self.size:int = int(size) self.name:str = name pass def main(): t = input() for example the user input the following: d 3 a 1 c 2 b 1 after the sorting it should be: (it sorted first by the sizes then if the sizes are the same it sorted by the names) d 3 c 2 a 1 b 1
[ "In order to override or declare sorting for an object, you should override the comparison operators. You need to specify one of (=, !=) and one of (>, <).\nclass Fish:\n def __init__(self, size, name):\n self.size = int(size)\n self.name = name\n\n def __eq__(self,other):\n return self.size == other.size and self.name==other.name\n\n def __gt__(self,other):\n if self.size != other.size:\n return self.size > other.size\n else:\n return self.name < other.name\n\nAn example\nf1 = Fish(1,\"a\")\nf2 = Fish(3,'d')\nf3 = Fish(2,'c')\nf4 = Fish(1,'b')\nfishes = [f1,f2,f3,f4]\nfor fish in sorted(fishes, reverse = True):\n print(fish.name, fish.size)\n\nOutput:\nd 3\nc 2\na 1\nb 1\n\n" ]
[ 0 ]
[]
[]
[ "class", "object", "python" ]
stackoverflow_0074535681_class_object_python.txt
Q: I want to create a code in python or Matlab to divide a sequence into pairs and give the values to these pairs I want to create a program in python or Matlab to divide a sequence into pairs such that first letter pairs with all other letters and give the values to these pairs. Example "ABCBADD" AB=1 AC=1/2 AB=1/3 AA=1/4 AD=1/5 AD=1/6 Now skip first letter of sequence "BCBADD" BC=1 BB=1/2 BA=1/3 BD=1/4 BD=1/5 Now skip first and so on "CBADD" and add same pair values as AB=1+1/3,AD=1/5+1/6,BD=1/4+1/5 I will be thankful to help me A: you could do with two loop: s = "ABCBADD" output = [(s[i] + c, 1 /(idx + 1)) for i in range(len(s) -1) for idx, c in enumerate(s[i+1:])] output: print(output) [('AB', 1.0), ('AC', 0.5), ('AB', 0.3333333333333333), ('AA', 0.25), ('AD', 0.2), ('AD', 0.16666666666666666), ('BC', 1.0), ('BB', 0.5), ('BA', 0.3333333333333333), ('BD', 0.25), ('BD', 0.2), ('CB', 1.0), ('CA', 0.5), ('CD', 0.3333333333333333), ('CD', 0.25), ('BA', 1.0), ('BD', 0.5), ('BD', 0.3333333333333333), ('AD', 1.0), ('AD', 0.5), ('DD', 1.0)]
I want to create a code in python or Matlab to divide a sequence into pairs and give the values to these pairs
I want to create a program in python or Matlab to divide a sequence into pairs such that first letter pairs with all other letters and give the values to these pairs. Example "ABCBADD" AB=1 AC=1/2 AB=1/3 AA=1/4 AD=1/5 AD=1/6 Now skip first letter of sequence "BCBADD" BC=1 BB=1/2 BA=1/3 BD=1/4 BD=1/5 Now skip first and so on "CBADD" and add same pair values as AB=1+1/3,AD=1/5+1/6,BD=1/4+1/5 I will be thankful to help me
[ "you could do with two loop:\ns = \"ABCBADD\" \noutput = [(s[i] + c, 1 /(idx + 1)) for i in range(len(s) -1) for idx, c in enumerate(s[i+1:])]\n\noutput:\nprint(output)\n\n[('AB', 1.0),\n ('AC', 0.5),\n ('AB', 0.3333333333333333),\n ('AA', 0.25),\n ('AD', 0.2),\n ('AD', 0.16666666666666666),\n ('BC', 1.0),\n ('BB', 0.5),\n ('BA', 0.3333333333333333),\n ('BD', 0.25),\n ('BD', 0.2),\n ('CB', 1.0),\n ('CA', 0.5),\n ('CD', 0.3333333333333333),\n ('CD', 0.25),\n ('BA', 1.0),\n ('BD', 0.5),\n ('BD', 0.3333333333333333),\n ('AD', 1.0),\n ('AD', 0.5),\n ('DD', 1.0)]\n\n" ]
[ 1 ]
[]
[]
[ "function", "loops", "matlab", "python", "python_3.x" ]
stackoverflow_0074535788_function_loops_matlab_python_python_3.x.txt
Q: Deploy a flask app in using Cloudera Application I have been using the following python 3 script in a CDSW session which run just fine as long as the session is not killed. I am able to click on the top-right grid and select my app hello.py from flask import Flask import os app = Flask(__name__) @app.route('/') def index(): return 'Web App with Python Flask!' app.run(host=os.getenv("CDSW_IP_ADDRESS"), port=int(os.getenv('CDSW_PUBLIC_PORT'))) I would like this app to run 24/7, so instead of using a Session or scheduling a job that never ends, I would like to create a CDSW Application so that it doesn't stop. This is the settings on my application: Logs: from flask import Flask import os app = Flask(__name__) @app.route('/') def index(): return 'Web App with Python Flask!' app.run(host=os.getenv("CDSW_IP_ADDRESS"), port=int(os.getenv('CDSW_PUBLIC_PORT'))) * Serving Flask app "__main__" (lazy loading) * Environment: production WARNING: Do not use the development server in a production environment. Use a production WSGI server instead. * Debug mode: off OSError: [Errno 98] Address already in use I tried to change the port from CDSW_PUBLIC_PORT to CDSW_APP_PORT but it ends up the same. A: As it mentions here maybe you need to change this line of code app.run(host=os.getenv("CDSW_IP_ADDRESS"), port=int(os.getenv('CDSW_PUBLIC_PORT'))) to this app.run(host="127.0.0.1", port=int(os.environ['CDSW_APP_PORT'])) Hope it works!
Deploy a flask app in using Cloudera Application
I have been using the following python 3 script in a CDSW session which run just fine as long as the session is not killed. I am able to click on the top-right grid and select my app hello.py from flask import Flask import os app = Flask(__name__) @app.route('/') def index(): return 'Web App with Python Flask!' app.run(host=os.getenv("CDSW_IP_ADDRESS"), port=int(os.getenv('CDSW_PUBLIC_PORT'))) I would like this app to run 24/7, so instead of using a Session or scheduling a job that never ends, I would like to create a CDSW Application so that it doesn't stop. This is the settings on my application: Logs: from flask import Flask import os app = Flask(__name__) @app.route('/') def index(): return 'Web App with Python Flask!' app.run(host=os.getenv("CDSW_IP_ADDRESS"), port=int(os.getenv('CDSW_PUBLIC_PORT'))) * Serving Flask app "__main__" (lazy loading) * Environment: production WARNING: Do not use the development server in a production environment. Use a production WSGI server instead. * Debug mode: off OSError: [Errno 98] Address already in use I tried to change the port from CDSW_PUBLIC_PORT to CDSW_APP_PORT but it ends up the same.
[ "As it mentions here maybe you need to change this line of code\napp.run(host=os.getenv(\"CDSW_IP_ADDRESS\"), port=int(os.getenv('CDSW_PUBLIC_PORT')))\n\nto this\napp.run(host=\"127.0.0.1\", port=int(os.environ['CDSW_APP_PORT']))\n\nHope it works!\n" ]
[ 0 ]
[]
[]
[ "cdsw", "cloudera", "flask", "python" ]
stackoverflow_0072126030_cdsw_cloudera_flask_python.txt
Q: How to remove excess whitespaces in entire python dataframe columns What is the pythonic way of removing all excess whitespaces in a dateframe(all the columns). I know the method .str.strip() can be used for single column or for each column. The dataframe as many columns as such I would like to apply the method on the entire dataframe. The whitespaces occur at different points, beginning of text, in between and at the end. Attached is view of what the dataframe looks like before and after removing the spaces. The dataframe below is the expected result. I have searched and most related questions I came across were for a single column. Sample data import pandas as pd data = [[' James Brown ', '10', 'USA'], [' James Bond', '15', 'UK'], ['Jimbo Bosco ', '14', 'Unknown']] df = pd.DataFrame(data, columns = ['Name', 'Age', 'Country']) ´´´ A: You could use apply: df = df.applymap(lambda x: " ".join(x.split()) if isinstance(x, str) else x) A: An idea would be to do a combination of: regex to remove duplicate spaces (e.g " James Bond" to " James Bond") str.strip to remove leading/trailing spaces (e.g " James Bond" to "James Bond"). You could do this one of two ways: 1. On the whole DataFrame: df = df.replace("\s+", " ", regex=True).apply(lambda x: x.str.strip()) 2. On each column individually: for col in ["Name", "Country"]: df[col] = df[col].replace("\s+", " ", regex=True).str.strip()
How to remove excess whitespaces in entire python dataframe columns
What is the pythonic way of removing all excess whitespaces in a dateframe(all the columns). I know the method .str.strip() can be used for single column or for each column. The dataframe as many columns as such I would like to apply the method on the entire dataframe. The whitespaces occur at different points, beginning of text, in between and at the end. Attached is view of what the dataframe looks like before and after removing the spaces. The dataframe below is the expected result. I have searched and most related questions I came across were for a single column. Sample data import pandas as pd data = [[' James Brown ', '10', 'USA'], [' James Bond', '15', 'UK'], ['Jimbo Bosco ', '14', 'Unknown']] df = pd.DataFrame(data, columns = ['Name', 'Age', 'Country']) ´´´
[ "You could use apply:\ndf = df.applymap(lambda x: \" \".join(x.split()) if isinstance(x, str) else x)\n\n", "An idea would be to do a combination of:\n\nregex to remove duplicate spaces (e.g \" James Bond\" to \" James Bond\")\nstr.strip to remove leading/trailing spaces (e.g \" James Bond\" to \"James Bond\").\n\nYou could do this one of two ways:\n1. On the whole DataFrame:\ndf = df.replace(\"\\s+\", \" \", regex=True).apply(lambda x: x.str.strip())\n\n2. On each column individually:\nfor col in [\"Name\", \"Country\"]:\n df[col] = df[col].replace(\"\\s+\", \" \", regex=True).str.strip()\n\n" ]
[ 2, 0 ]
[ "This works for me, seems shorter and cleaner:\ndf[col] = df[col].str.replace(' ','')\n\nYou can use it to replace any string items in the column values.\n" ]
[ -1 ]
[ "dataframe", "pandas", "python" ]
stackoverflow_0070770016_dataframe_pandas_python.txt
Q: penalty in multitrip vrp if different vehicle visits a destination in 2nd trip ORTOOLS I have implemented multitrip (Allow vehicles to visit a destination more than once) VRP using ortools. This has been done by duplicating nodes for destinations and introducing virtual depots with negative loads. I want same vehicle to visit destinations in 2nd trip which visited that destination in 1st trip. This is a soft constraint and a penalty should be added in the objective function if not followed. How can we implement this? A: /// Adds a soft constraint to force a set of variable indices to be on the /// same vehicle. If all nodes are not on the same vehicle, each extra vehicle /// used adds 'cost' to the cost function. void AddSoftSameVehicleConstraint(const std::vector<int64_t>& indices, int64_t cost); Reference Using this, I think you can model your requirement.
penalty in multitrip vrp if different vehicle visits a destination in 2nd trip ORTOOLS
I have implemented multitrip (Allow vehicles to visit a destination more than once) VRP using ortools. This has been done by duplicating nodes for destinations and introducing virtual depots with negative loads. I want same vehicle to visit destinations in 2nd trip which visited that destination in 1st trip. This is a soft constraint and a penalty should be added in the objective function if not followed. How can we implement this?
[ " /// Adds a soft constraint to force a set of variable indices to be on the\n /// same vehicle. If all nodes are not on the same vehicle, each extra vehicle\n /// used adds 'cost' to the cost function.\n void AddSoftSameVehicleConstraint(const std::vector<int64_t>& indices,\n int64_t cost);\n\nReference\nUsing this, I think you can model your requirement.\n" ]
[ 1 ]
[]
[]
[ "or_tools", "python", "vehicle_routing" ]
stackoverflow_0074531066_or_tools_python_vehicle_routing.txt
Q: Change default constructor argument value (inherited from parent class) in subclass I have a Parent class with a default value for the attribute arg2. I want to create a subclass Child which has a different default value for the same attribute. I need to use *args and **kwargs in Child. I tried the following, but it is not working: class Parent(object): def __init__(self, arg1='something', arg2='old default value'): self.arg1 = arg1 self.arg2 = arg2 print('arg1:', self.arg1) print('arg2:', self.arg2) class Child(Parent): def __init__(self, *args, **kwargs): super(Child, self).__init__(*args, **kwargs) self.arg2 = kwargs.pop('arg2', 'new value') This is not working. In fact, I get: >>> c = Child() arg1: something arg2: default value # This is still the old value >>> c.arg2 'new value' # Seems more or less ok >>> c = Child('one', 'two') arg1: one arg2: two >>> c.arg2 'new value' # This is wrong, it has overridden the specified argument 'two' A: You need to set the default in kwargs before passing it on to super(); this is tricky as you need to ensure that the same value is not already in args too: class Child(Parent): def __init__(self, *args, **kwargs): if len(args) < 2 and 'arg2' not in kwargs: kwargs['arg2'] = 'new value' super(Child, self).__init__(*args, **kwargs) This relies on knowing how many arguments are there to fill however. You'd have to use introspection of super().__init__ for this to work in the general case: from inspect import getargspec class Child(Parent): def __init__(self, *args, **kwargs): super_init = super().__init__ argspec = getargspec(super_init) arg2_index = argspec.args.index('arg2') - 1 # account for self if len(args) < arg2_index and 'arg2' not in kwargs: kwargs['arg2'] = 'new value' super(Child, self).__init__(*args, **kwargs) You'd be much better off specifying all defaults instead: class Child(Parent): def __init__(self, arg1='something', arg2='new value'): super(Child, self).__init__(arg1=arg1, arg2=arg2) A: You've actually changed the signature of the class. Basically, with: def foo(a=1, b=2): ... you can call by position, or by keyword: foo(2, 3) foo(a=2, b=3) With: def bar(**kwargs): ... you can't call with positional arguments any more: bar(2, 3) # TypeError! Your actual code has additional complications because you have *args in there which eat up all of your positional arguments. The most robust advice I can give you is to preserve the signature when you override the method: class Child(Parent): def __init__(self, arg1='something', arg2='new value'): super(Child, self).__init__(arg1=arg1, arg2=arg2) This (unfortunately) isn't a DRY (Don't Repeat Yourself) as you'd probably like -- You have to specify 'something' twice. You could turn it into a global constant, or change the signature of Parent.__init__. Alternatively, you could do a bunch of introspection to work with the signature of the parent class to make sure that you're passing the correct arguments in the right ways -- but I doubt very much that it's worth it. A: I wasn't satisfied with either solution and came up with the one below. It introduces the defaults as class attributes which are loaded if the default is None: #!/usr/bin/env python3 class Parent: _arg1_default = 'something' _arg2_default = 'old default value' def __init__(self, arg1=None, arg2=None): if arg1 is None: arg1 = self._arg1_default if arg2 is None: arg2 = self._arg2_default self.arg1 = arg1 self.arg2 = arg2 print('arg1:', self.arg1) print('arg2:', self.arg2) class Child(Parent): _arg2_default = 'new value' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if __name__ == '__main__': print('Call Parent without arguments (use defaults)') parent = Parent() print('Call Child without arguments (use defaults)') child = Child() print('Call 2nd Child with custom arguments ("one", "two")') child2 = Child('one', 'two') print('Query arg2 of 2nd child') print(child2.arg2) Yields: Call Parent without arguments (use defaults) arg1: something arg2: old default value Call Child without arguments (use defaults) arg1: something arg2: new value Call 2nd Child with custom arguments ("one", "two") arg1: one arg2: two Query arg2 of 2nd child two
Change default constructor argument value (inherited from parent class) in subclass
I have a Parent class with a default value for the attribute arg2. I want to create a subclass Child which has a different default value for the same attribute. I need to use *args and **kwargs in Child. I tried the following, but it is not working: class Parent(object): def __init__(self, arg1='something', arg2='old default value'): self.arg1 = arg1 self.arg2 = arg2 print('arg1:', self.arg1) print('arg2:', self.arg2) class Child(Parent): def __init__(self, *args, **kwargs): super(Child, self).__init__(*args, **kwargs) self.arg2 = kwargs.pop('arg2', 'new value') This is not working. In fact, I get: >>> c = Child() arg1: something arg2: default value # This is still the old value >>> c.arg2 'new value' # Seems more or less ok >>> c = Child('one', 'two') arg1: one arg2: two >>> c.arg2 'new value' # This is wrong, it has overridden the specified argument 'two'
[ "You need to set the default in kwargs before passing it on to super(); this is tricky as you need to ensure that the same value is not already in args too:\nclass Child(Parent):\n def __init__(self, *args, **kwargs):\n if len(args) < 2 and 'arg2' not in kwargs:\n kwargs['arg2'] = 'new value'\n super(Child, self).__init__(*args, **kwargs)\n\nThis relies on knowing how many arguments are there to fill however. You'd have to use introspection of super().__init__ for this to work in the general case:\nfrom inspect import getargspec\n\nclass Child(Parent):\n def __init__(self, *args, **kwargs):\n super_init = super().__init__\n argspec = getargspec(super_init)\n arg2_index = argspec.args.index('arg2') - 1 # account for self\n if len(args) < arg2_index and 'arg2' not in kwargs:\n kwargs['arg2'] = 'new value'\n super(Child, self).__init__(*args, **kwargs)\n\nYou'd be much better off specifying all defaults instead:\nclass Child(Parent):\n def __init__(self, arg1='something', arg2='new value'):\n super(Child, self).__init__(arg1=arg1, arg2=arg2)\n\n", "You've actually changed the signature of the class. Basically, with:\ndef foo(a=1, b=2):\n ...\n\nyou can call by position, or by keyword:\nfoo(2, 3)\nfoo(a=2, b=3)\n\nWith:\ndef bar(**kwargs):\n ...\n\nyou can't call with positional arguments any more:\nbar(2, 3) # TypeError!\n\nYour actual code has additional complications because you have *args in there which eat up all of your positional arguments.\n\nThe most robust advice I can give you is to preserve the signature when you override the method:\nclass Child(Parent):\n def __init__(self, arg1='something', arg2='new value'):\n super(Child, self).__init__(arg1=arg1, arg2=arg2)\n\nThis (unfortunately) isn't a DRY (Don't Repeat Yourself) as you'd probably like -- You have to specify 'something' twice. You could turn it into a global constant, or change the signature of Parent.__init__.\nAlternatively, you could do a bunch of introspection to work with the signature of the parent class to make sure that you're passing the correct arguments in the right ways -- but I doubt very much that it's worth it.\n", "I wasn't satisfied with either solution and came up with the one below. It introduces the defaults as class attributes which are loaded if the default is None:\n#!/usr/bin/env python3\n\nclass Parent:\n _arg1_default = 'something'\n _arg2_default = 'old default value'\n\n def __init__(self, arg1=None, arg2=None):\n if arg1 is None:\n arg1 = self._arg1_default\n if arg2 is None:\n arg2 = self._arg2_default\n\n self.arg1 = arg1\n self.arg2 = arg2\n\n print('arg1:', self.arg1)\n print('arg2:', self.arg2)\n\n\nclass Child(Parent):\n _arg2_default = 'new value'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\nif __name__ == '__main__':\n print('Call Parent without arguments (use defaults)')\n parent = Parent()\n print('Call Child without arguments (use defaults)')\n child = Child()\n print('Call 2nd Child with custom arguments (\"one\", \"two\")')\n child2 = Child('one', 'two')\n print('Query arg2 of 2nd child')\n print(child2.arg2)\n\nYields:\nCall Parent without arguments (use defaults)\narg1: something\narg2: old default value\nCall Child without arguments (use defaults)\narg1: something\narg2: new value\nCall 2nd Child with custom arguments (\"one\", \"two\")\narg1: one\narg2: two\nQuery arg2 of 2nd child\ntwo \n\n" ]
[ 3, 2, 0 ]
[]
[]
[ "arguments", "constructor", "python", "subclass" ]
stackoverflow_0041623464_arguments_constructor_python_subclass.txt
Q: How to select a subset of rows based on a specific range in Python I have a dataset that contains information about commits. The dataset is quite similar to this: commit bug sha_1 Stable sha_2 Stable sha_3 Stable sha_4 Increase sha_5 Stable sha_6 Stable sha_7 Decrease sha_8 Stable sha_9 Decrease sha_10 Decrease sha_11 Increase sha_12 Stable I need to select the range of rows that contains "Increase" and "Decrease" (or vice versa) and are contained inside two commits, "Stable". For instance, according to the previous example, the output should be the following: commit bug sha_3 Stable sha_4 Increase sha_5 Stable sha_6 Stable sha_7 Decrease sha_8 Stable sha_8 Stable sha_9 Decrease sha_10 Decrease sha_11 Increase sha_12 Stable Any solution? A: Let's suppose I read the data from my csv file: import pandas as pd df = pd.read_excel('C:\\Users\\...\\Desktop\\Workbook1.xlsx') keep = [] for i in range(1, len(df) - 1): previous = df.loc[i-1, "bug"] current = df.loc[i, "bug"] next = df.loc[i+1, "bug"] if previous != "Stable" or current != "Stable" or next != "Stable": keep.append(i) keep.append(i + 1) df = df.filter(items=keep, axis=0) print(df) Is this what you expect to have?
How to select a subset of rows based on a specific range in Python
I have a dataset that contains information about commits. The dataset is quite similar to this: commit bug sha_1 Stable sha_2 Stable sha_3 Stable sha_4 Increase sha_5 Stable sha_6 Stable sha_7 Decrease sha_8 Stable sha_9 Decrease sha_10 Decrease sha_11 Increase sha_12 Stable I need to select the range of rows that contains "Increase" and "Decrease" (or vice versa) and are contained inside two commits, "Stable". For instance, according to the previous example, the output should be the following: commit bug sha_3 Stable sha_4 Increase sha_5 Stable sha_6 Stable sha_7 Decrease sha_8 Stable sha_8 Stable sha_9 Decrease sha_10 Decrease sha_11 Increase sha_12 Stable Any solution?
[ "Let's suppose I read the data from my csv file:\nimport pandas as pd\n\ndf = pd.read_excel('C:\\\\Users\\\\...\\\\Desktop\\\\Workbook1.xlsx')\n\nkeep = []\nfor i in range(1, len(df) - 1):\n previous = df.loc[i-1, \"bug\"]\n current = df.loc[i, \"bug\"]\n next = df.loc[i+1, \"bug\"]\n\n if previous != \"Stable\" or current != \"Stable\" or next != \"Stable\":\n keep.append(i)\n\nkeep.append(i + 1)\n\ndf = df.filter(items=keep, axis=0)\n\nprint(df)\n\n\nIs this what you expect to have?\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074535224_dataframe_pandas_python.txt
Q: Invalid Syntax jose.py I was trying to use jose library for authentication for one of my flask apps. using the import statement as follows from jose import jwt But it throws following An error, Traceback (most recent call last): File "F:/XXX_XXX/xxxx-services-web/src/auth.py", line 6, in <module> from jose import jwt File "F:\Users\XXXX_XXXXX\AppData\Local\Programs\Python\Python37\lib\site-packages\jose.py", line 546 print decrypt(deserialize_compact(jwt), {'k':key}, ^ SyntaxError: invalid syntax Is this library outdated? A: installing python-jose instead of jose fixed my problem. https://pypi.org/project/python-jose/ A: One solution is to install python-jose instead of installing jose. Apart from that you can use import python_jwt as jwt instead of from jose import jwt and install the package via pip install python-jwt
Invalid Syntax jose.py
I was trying to use jose library for authentication for one of my flask apps. using the import statement as follows from jose import jwt But it throws following An error, Traceback (most recent call last): File "F:/XXX_XXX/xxxx-services-web/src/auth.py", line 6, in <module> from jose import jwt File "F:\Users\XXXX_XXXXX\AppData\Local\Programs\Python\Python37\lib\site-packages\jose.py", line 546 print decrypt(deserialize_compact(jwt), {'k':key}, ^ SyntaxError: invalid syntax Is this library outdated?
[ "installing python-jose instead of jose fixed my problem.\nhttps://pypi.org/project/python-jose/\n", "One solution is to install python-jose instead of installing jose.\nApart from that you can use import python_jwt as jwt instead of from jose import jwt and install the package via pip install python-jwt\n" ]
[ 20, 0 ]
[]
[]
[ "jose", "python" ]
stackoverflow_0065102969_jose_python.txt
Q: How to start another thread without waiting for function to finish? Hey I am making a telegram bot and I need it to be able to run the same command multiple times at once. dispatcher.add_handler(CommandHandler("send", send)) This is the command ^ And inside the command it starts a function: sendmail(email, amount, update, context) This function takes around 5seconds to finish. I want it so I can run it multiple times at once without needing to wait for it to finish. I tried the following: Thread(target=sendmail(email, amount, update, context)).start() This would give me no errors but It waits for function to finish then proceeds. I also tried this with ThreadPoolExecutor(max_workers=100) as executor: executor.submit(sendmail, email, amount, update, context).result() but it gave me the following error: No error handlers are registered, logging exception. Traceback (most recent call last): File "C:\Users\seal\AppData\Local\Programs\Python\Python310\lib\site-packages\telegram\ext\dispatcher.py", line 557, in process_update handler.handle_update(update, self, check, context) File "C:\Users\seal\AppData\Local\Programs\Python\Python310\lib\site-packages\telegram\ext\handler.py", line 199, in handle_update return self.callback(update, context) File "c:\Users\seal\Downloads\telegrambot\main.py", line 382, in sendmailcmd executor.submit(sendmail, email, amount, update, context).result() File "C:\Users\main\AppData\Local\Programs\Python\Python310\lib\concurrent\futures\thread.py", line 169, in submit raise RuntimeError('cannot schedule new futures after ' RuntimeError: cannot schedule new futures after interpreter shutdown A: This is my first attempt at threading, but maybe try this: import threading x1 = threading.Thread(target=sendmail, args=(email, amount, update, context)) x1.start() You can just put the x1 = threading... and x1.start() in a loop to have it run multiple times Hope this helps A: It's not waiting for one function to finish, to start another, but in python GIL (Global Interpreter Lock) executes only one thread at a given time. Since thread use multiple cores, time between two functions are negligible in most cases. Following is the way to start threads with the ThreadPoolExecutor, please adjust it to your usecase. def async_send_email(emails_to_send): with ThreadPoolExecutor(max_workers=32) as executor: futures = [ executor.submit( send_email, email=email_to_send.email, amount=email_to_send.amount, update=email_to_send.update, context=email_to_send.context ) for email_to_send in emails_to_send ] for future, email_to_send in zip(futures, emails_to_send): try: future.result() except Exception as e: # Handle the exceptions. continue def send_email(email, amount, update, context): # do what you want here.
How to start another thread without waiting for function to finish?
Hey I am making a telegram bot and I need it to be able to run the same command multiple times at once. dispatcher.add_handler(CommandHandler("send", send)) This is the command ^ And inside the command it starts a function: sendmail(email, amount, update, context) This function takes around 5seconds to finish. I want it so I can run it multiple times at once without needing to wait for it to finish. I tried the following: Thread(target=sendmail(email, amount, update, context)).start() This would give me no errors but It waits for function to finish then proceeds. I also tried this with ThreadPoolExecutor(max_workers=100) as executor: executor.submit(sendmail, email, amount, update, context).result() but it gave me the following error: No error handlers are registered, logging exception. Traceback (most recent call last): File "C:\Users\seal\AppData\Local\Programs\Python\Python310\lib\site-packages\telegram\ext\dispatcher.py", line 557, in process_update handler.handle_update(update, self, check, context) File "C:\Users\seal\AppData\Local\Programs\Python\Python310\lib\site-packages\telegram\ext\handler.py", line 199, in handle_update return self.callback(update, context) File "c:\Users\seal\Downloads\telegrambot\main.py", line 382, in sendmailcmd executor.submit(sendmail, email, amount, update, context).result() File "C:\Users\main\AppData\Local\Programs\Python\Python310\lib\concurrent\futures\thread.py", line 169, in submit raise RuntimeError('cannot schedule new futures after ' RuntimeError: cannot schedule new futures after interpreter shutdown
[ "This is my first attempt at threading, but maybe try this:\nimport threading\nx1 = threading.Thread(target=sendmail, args=(email, amount, update, context))\nx1.start()\n\nYou can just put the x1 = threading... and x1.start() in a loop to have it run multiple times\nHope this helps\n", "It's not waiting for one function to finish, to start another, but in python GIL (Global Interpreter Lock) executes only one thread at a given time. Since thread use multiple cores, time between two functions are negligible in most cases.\nFollowing is the way to start threads with the ThreadPoolExecutor, please adjust it to your usecase.\ndef async_send_email(emails_to_send):\n with ThreadPoolExecutor(max_workers=32) as executor:\n futures = [\n executor.submit(\n send_email,\n email=email_to_send.email,\n amount=email_to_send.amount,\n update=email_to_send.update,\n context=email_to_send.context\n )\n for email_to_send in emails_to_send\n ]\n\n for future, email_to_send in zip(futures, emails_to_send):\n try:\n future.result()\n except Exception as e:\n # Handle the exceptions.\n continue\n\ndef send_email(email, amount, update, context):\n # do what you want here.\n\n" ]
[ 1, 0 ]
[]
[]
[ "multithreading", "python", "telegram" ]
stackoverflow_0074535815_multithreading_python_telegram.txt
Q: How to enable autoscrolling to bottom for wx.html.HtmlWindow I am using wxPython and want my HtmlWindow to scroll down automatically after adding new content. I am using it as a log window inside my app. Unfortunately, I am struggling to get it working. Here is my sample with lacks the functionality: import wx import wx.html class GUI(wx.Frame): def __init__(self, parent): super().__init__(parent) self.html = wx.html.HtmlWindow(self, -1, pos=(0, 0), size=(50, 50)) msg = '<pre>FOO</pre>' for i in range(10): self.html.AppendToPage(msg) if __name__ == '__main__': app = wx.App() frame = GUI(parent=None) frame.Show() app.MainLoop() I want the scrollbar showing the stack of "Foos" to be at the bottom instead of staying on top so that the latest logging content is shown to the user. A: Arguably wx.html.HtmlWindow is the wrong tool to use. You'd have to insert Anchors and then leap to each Anchor. For a log, it's better to use a wx.TextCtrl e.g. import wx import time class GUI(wx.Frame): def __init__(self, parent): super().__init__(parent) self.log = wx.TextCtrl(self, wx.ID_ANY, size=(600, 480), style=wx.TE_MULTILINE| wx.TE_READONLY| wx.VSCROLL) # set initial position at the start self.log.SetInsertionPoint(0) self.Show() msg = 'FOO\n' for i in range(50): self.log.write(msg) wx.GetApp().Yield() time.sleep(0.5) if __name__ == '__main__': app = wx.App() frame = GUI(parent=None) app.MainLoop()
How to enable autoscrolling to bottom for wx.html.HtmlWindow
I am using wxPython and want my HtmlWindow to scroll down automatically after adding new content. I am using it as a log window inside my app. Unfortunately, I am struggling to get it working. Here is my sample with lacks the functionality: import wx import wx.html class GUI(wx.Frame): def __init__(self, parent): super().__init__(parent) self.html = wx.html.HtmlWindow(self, -1, pos=(0, 0), size=(50, 50)) msg = '<pre>FOO</pre>' for i in range(10): self.html.AppendToPage(msg) if __name__ == '__main__': app = wx.App() frame = GUI(parent=None) frame.Show() app.MainLoop() I want the scrollbar showing the stack of "Foos" to be at the bottom instead of staying on top so that the latest logging content is shown to the user.
[ "Arguably wx.html.HtmlWindow is the wrong tool to use.\nYou'd have to insert Anchors and then leap to each Anchor.\nFor a log, it's better to use a wx.TextCtrl e.g.\nimport wx\nimport time\n\nclass GUI(wx.Frame):\n\n def __init__(self, parent):\n super().__init__(parent)\n self.log = wx.TextCtrl(self, wx.ID_ANY, size=(600, 480),\n style=wx.TE_MULTILINE| wx.TE_READONLY| wx.VSCROLL)\n # set initial position at the start\n self.log.SetInsertionPoint(0)\n self.Show()\n \n msg = 'FOO\\n'\n for i in range(50):\n self.log.write(msg)\n wx.GetApp().Yield()\n time.sleep(0.5)\n\nif __name__ == '__main__':\n app = wx.App()\n frame = GUI(parent=None)\n app.MainLoop()\n\n" ]
[ 0 ]
[]
[]
[ "python", "wxhtmlwindow", "wxpython" ]
stackoverflow_0074534631_python_wxhtmlwindow_wxpython.txt
Q: When trying to position elements in tkinter all elements are moved I'm using tkinter to create a very simple GUI just to start learning how to use the module. However I'm trying to position two elements (a button and a text box). To position the elements I'm using the grid function, however I have used grid for both the button and the textbox and it seems that both elements are affected by just one set of the parameters when I have two sets of parameters for positioning. This is the code: from tkinter import * import tkinter as tk from tkinter import ttk gui = Tk() gui.geometry("600x800") button = ttk.Button(text="button") button.grid(row=1, column=1,ipady=30, ipadx=30) lowerframe = tk.Frame(gui) lowerframe.grid(row=2, column=1, padx = 100) tb = ttk.Entry(lowerframe, width= 20) tb.grid(ipady=30, ipadx= 0) gui.mainloop() As you can see there are two different sets of parameters for the positioning of the button and the textbox however for some reason the button and the textbox end up being in the same position. So how can I make the parameters affect the elements they're intended to affect? A: Does this help? You used too many namespace tkinter. I added gui widget for Button I also added tb.grid for row and column. Code: import tkinter as tk from tkinter import ttk gui = tk.Tk() gui.geometry("600x800") button = ttk.Button(gui, text="button") button.grid(row=1, column=1,ipady=30, ipadx=30) lowerframe = ttk.Frame(gui) lowerframe.grid(row=2, column=1, padx = 100) tb = ttk.Entry(lowerframe, width= 20) tb.grid(row=3, column=1, ipady=30, ipadx= 30) gui.mainloop() Result:
When trying to position elements in tkinter all elements are moved
I'm using tkinter to create a very simple GUI just to start learning how to use the module. However I'm trying to position two elements (a button and a text box). To position the elements I'm using the grid function, however I have used grid for both the button and the textbox and it seems that both elements are affected by just one set of the parameters when I have two sets of parameters for positioning. This is the code: from tkinter import * import tkinter as tk from tkinter import ttk gui = Tk() gui.geometry("600x800") button = ttk.Button(text="button") button.grid(row=1, column=1,ipady=30, ipadx=30) lowerframe = tk.Frame(gui) lowerframe.grid(row=2, column=1, padx = 100) tb = ttk.Entry(lowerframe, width= 20) tb.grid(ipady=30, ipadx= 0) gui.mainloop() As you can see there are two different sets of parameters for the positioning of the button and the textbox however for some reason the button and the textbox end up being in the same position. So how can I make the parameters affect the elements they're intended to affect?
[ "Does this help? You used too many namespace tkinter. I added gui widget for Button I also added tb.grid for row and column.\nCode:\nimport tkinter as tk\nfrom tkinter import ttk\ngui = tk.Tk()\ngui.geometry(\"600x800\")\nbutton = ttk.Button(gui, text=\"button\")\nbutton.grid(row=1, column=1,ipady=30, ipadx=30)\nlowerframe = ttk.Frame(gui)\nlowerframe.grid(row=2, column=1, padx = 100)\ntb = ttk.Entry(lowerframe, width= 20)\ntb.grid(row=3, column=1, ipady=30, ipadx= 30)\ngui.mainloop()\nResult:\n\n" ]
[ 0 ]
[]
[]
[ "python", "tkinter" ]
stackoverflow_0072139023_python_tkinter.txt