content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
list
answers_scores
list
non_answers
list
non_answers_scores
list
tags
list
name
stringlengths
35
137
Q: How can I convert my "price" column from string to number format? # Import required modules import requests from bs4 import BeautifulSoup import time import pandas as pd # Get data from webpage mystocks = ['GOOG', 'META', 'MSFT', 'PLTR', 'TSLA', 'ZS', 'PYPL', 'SHOP', 'TTCF'] def getData(symbol): headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'} url = f'https://finance.yahoo.com/quote/{symbol}' r = requests.get(url, headers=headers) soup = BeautifulSoup(r.text, 'html.parser') stock = { 'symbol': symbol, 'price': soup.find('div', {'class':'D(ib) Mend(20px)'}).find_all('fin-streamer') [0].text, } return stock def export_data(stockdata): df = pd.DataFrame(stockdata) df.to_excel("LETS GO2.xlsx") df = df.apply(pd.to_numeric) df.apply(pd.to_numeric, errors='ignore') if __name__ == '__main__': while True: stockdata = [] for item in mystocks: print(item) stockdata.append(getData(item)) export_data(stockdata) time_wait = 10 print(f'Waiting {time_wait} minutes...') time.sleep(time_wait * 60) I need to convert the "price" column into number format, but the solution df["A"] = pd.to_numeric(df["A"]) does not work...no errors are presenting anymore (that was probably an issue with my code), but the exported excel is not returning the number data type as requested. Appreciate all the help, thanks! A: Do it like this. import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import scipy.optimize as sco import datetime as dt import math from datetime import datetime, timedelta from pandas_datareader import data as wb from sklearn.cluster import KMeans np.random.seed(777) start = '2018-06-30' end = '2020-06-30' # N = 90 # start = datetime.now() - timedelta(days=N) # end = dt.datetime.today() tickers = ['AXP','AAPL','BA','CAT','CSCO','CVX','XOM','GS','HD','IBM','INTC','JNJ','KO','JPM','MCD','MMM','MRK','MSFT','NKE','PFE','PG','TRV','UNH','RTX','VZ','V','WBA','WMT','DIS','DOW'] thelen = len(tickers) price_data = [] for ticker in tickers: prices = wb.DataReader(ticker, start = start, end = end, data_source='yahoo')[['Adj Close']] price_data.append(prices.assign(ticker=ticker)[['ticker', 'Adj Close']]) df = pd.concat(price_data) df.dtypes df.head() df.shape pd.set_option('display.max_columns', 500) df = df.reset_index() df = df.set_index('Date') table = df.pivot(columns='ticker') # By specifying col[1] in below list comprehension # You can select the stock names under multi-level column table.columns = [col[1] for col in table.columns] table.head() This is what you get.
How can I convert my "price" column from string to number format?
# Import required modules import requests from bs4 import BeautifulSoup import time import pandas as pd # Get data from webpage mystocks = ['GOOG', 'META', 'MSFT', 'PLTR', 'TSLA', 'ZS', 'PYPL', 'SHOP', 'TTCF'] def getData(symbol): headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'} url = f'https://finance.yahoo.com/quote/{symbol}' r = requests.get(url, headers=headers) soup = BeautifulSoup(r.text, 'html.parser') stock = { 'symbol': symbol, 'price': soup.find('div', {'class':'D(ib) Mend(20px)'}).find_all('fin-streamer') [0].text, } return stock def export_data(stockdata): df = pd.DataFrame(stockdata) df.to_excel("LETS GO2.xlsx") df = df.apply(pd.to_numeric) df.apply(pd.to_numeric, errors='ignore') if __name__ == '__main__': while True: stockdata = [] for item in mystocks: print(item) stockdata.append(getData(item)) export_data(stockdata) time_wait = 10 print(f'Waiting {time_wait} minutes...') time.sleep(time_wait * 60) I need to convert the "price" column into number format, but the solution df["A"] = pd.to_numeric(df["A"]) does not work...no errors are presenting anymore (that was probably an issue with my code), but the exported excel is not returning the number data type as requested. Appreciate all the help, thanks!
[ "Do it like this.\nimport pandas as pd \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.optimize as sco\nimport datetime as dt\nimport math\nfrom datetime import datetime, timedelta\nfrom pandas_datareader import data as wb\nfrom sklearn.cluster import KMeans\nnp.random.seed(777)\n\n\nstart = '2018-06-30'\nend = '2020-06-30'\n# N = 90\n# start = datetime.now() - timedelta(days=N)\n# end = dt.datetime.today()\n\n\n\ntickers = ['AXP','AAPL','BA','CAT','CSCO','CVX','XOM','GS','HD','IBM','INTC','JNJ','KO','JPM','MCD','MMM','MRK','MSFT','NKE','PFE','PG','TRV','UNH','RTX','VZ','V','WBA','WMT','DIS','DOW']\n\nthelen = len(tickers)\n\nprice_data = []\nfor ticker in tickers:\n prices = wb.DataReader(ticker, start = start, end = end, data_source='yahoo')[['Adj Close']]\n price_data.append(prices.assign(ticker=ticker)[['ticker', 'Adj Close']])\n\ndf = pd.concat(price_data)\ndf.dtypes\ndf.head()\ndf.shape\n\npd.set_option('display.max_columns', 500)\n\ndf = df.reset_index()\ndf = df.set_index('Date')\ntable = df.pivot(columns='ticker')\n# By specifying col[1] in below list comprehension\n# You can select the stock names under multi-level column\ntable.columns = [col[1] for col in table.columns]\ntable.head()\n\nThis is what you get.\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074133034_python.txt
Q: Object Detection with YOLOV7 on custom dataset I am trying to predict bounding boxes on a custom dataset using transfer learning on yolov7 pretrained model. My dataset contains 34 scenes for training, 2 validation scenes and 5 test scenes. Nothing much happens on the scene, just the camera moves 60-70 degree around the objects on a table/flat surface and scales/tilts a bit. So, even though I have around 20k training images (extracted from 34 scenes), from each scene, the images I get are almost the same, with a kind of augmentation effect (scaling, rotation, occlusion and tilting coming from camera movement). Here is an example of a scene (first frame and last frame) Now, I tried different things. transfer learning with Pretrained yolov7 p5 model transfer learning with Pretrained yolov7 p5 model (with freezing the extractor, 50 layers) transfer learning with Pretrained yolov7 tiny model transfer learning with Pretrained yolov7 tiny model (with freezing the extractor, 28 layers) full training yolov7 p5 network full training yolov7 tiny network. Some of them kind of works (correctly predicts the bounding boxes with 100% precision, but lower recall, and sometimes with wrong class label), but the biggest problem I am facing is, for validation, the object loss is never going down (No matter which approach I try). It happens even from the start, so not sure if I am overfitting or not. The below graph is from transfer learning in tiny model with frozen backbone. Any suggestions of how to solve the problem and get a better result? A: I would suggest you thoroughly review your dataset, to start. Check the class distributions. How many classes do you have, and what are the counts of the objects of these classes in the training set? What are the counts in the validation set? Are the ratios approximately similar or different? Is any class lacking examples (i.e. is too few by proportion)? Do you have enough background samples? (Images where no desired object is present) Check your dataset's annotations. Are your objects labelled correctly? If you have time, take a random 1000 images and plot the bounding boxes on them and manually check the labels. This is a sort of sanity check, and sometimes you can find wrongly drawn boxes and incorrect labels. Another issue could be the lack of variety, as you have mentioned. You have 20K images in your training set, but possibly there are at most just ~34 unique mugs inside (assuming mug is a class). Maybe all those mugs are white, blue, or brown in color, but in your validation the mug is a bright red. (I hope you get the idea). Try playing around with the hyperparameters a bit. Explore a slightly lower or slightly longer learning rate, longer warmup, stronger weight decay. I assume these are the settings you are using; try increasing the mosaic, copy paste, flip up etc. probabilities as well. If stronger augmentation params are having positive results, it could be a hint that the problem is that the dataset is redundant and lacks variety.
Object Detection with YOLOV7 on custom dataset
I am trying to predict bounding boxes on a custom dataset using transfer learning on yolov7 pretrained model. My dataset contains 34 scenes for training, 2 validation scenes and 5 test scenes. Nothing much happens on the scene, just the camera moves 60-70 degree around the objects on a table/flat surface and scales/tilts a bit. So, even though I have around 20k training images (extracted from 34 scenes), from each scene, the images I get are almost the same, with a kind of augmentation effect (scaling, rotation, occlusion and tilting coming from camera movement). Here is an example of a scene (first frame and last frame) Now, I tried different things. transfer learning with Pretrained yolov7 p5 model transfer learning with Pretrained yolov7 p5 model (with freezing the extractor, 50 layers) transfer learning with Pretrained yolov7 tiny model transfer learning with Pretrained yolov7 tiny model (with freezing the extractor, 28 layers) full training yolov7 p5 network full training yolov7 tiny network. Some of them kind of works (correctly predicts the bounding boxes with 100% precision, but lower recall, and sometimes with wrong class label), but the biggest problem I am facing is, for validation, the object loss is never going down (No matter which approach I try). It happens even from the start, so not sure if I am overfitting or not. The below graph is from transfer learning in tiny model with frozen backbone. Any suggestions of how to solve the problem and get a better result?
[ "I would suggest you thoroughly review your dataset, to start.\n\nCheck the class distributions.\n\nHow many classes do you have, and what are the counts of the objects of these classes in the training set?\nWhat are the counts in the validation set? Are the ratios approximately similar or different?\nIs any class lacking examples (i.e. is too few by proportion)?\nDo you have enough background samples? (Images where no desired object is present)\n\n\nCheck your dataset's annotations. Are your objects labelled correctly? If you have time, take a random 1000 images and plot the bounding boxes on them and manually check the labels. This is a sort of sanity check, and sometimes you can find wrongly drawn boxes and incorrect labels.\n\nAnother issue could be the lack of variety, as you have mentioned. You have 20K images in your training set, but possibly there are at most just ~34 unique mugs inside (assuming mug is a class). Maybe all those mugs are white, blue, or brown in color, but in your validation the mug is a bright red. (I hope you get the idea).\n\nTry playing around with the hyperparameters a bit. Explore a slightly lower or slightly longer learning rate, longer warmup, stronger weight decay. I assume these are the settings you are using; try increasing the mosaic, copy paste, flip up etc. probabilities as well. If stronger augmentation params are having positive results, it could be a hint that the problem is that the dataset is redundant and lacks variety.\n\n\n" ]
[ 1 ]
[]
[]
[ "computer_vision", "deep_learning", "machine_learning", "python", "pytorch" ]
stackoverflow_0074507437_computer_vision_deep_learning_machine_learning_python_pytorch.txt
Q: How to render emojis in matplotlib with 'natural' colors? I want to use emoji in a plot, which works with the correct font (Segoe UI Emoji on Windows), however I cannot figure out how to use the 'natural' colors. When rendered in a browser or MS Word, the glyphs have their own colors defined (I presume) by the font. In this example, they are grey and yellow. However they become monochrome when used in matplotlib and I can't seem to use 'auto' as a font color. fig, ax = plt.subplots() ax.text(.2, .2, '', fontname='Segoe UI Emoji', fontsize=20) ax.text(.4, .4, '', fontname='Segoe UI Emoji', fontsize=20) ax.text(.6, .6, '', fontname='Segoe UI Emoji', fontsize=20) ax.text(.8, .8, '', fontname='Segoe UI Emoji', fontsize=20) plt.show() A: i have created a small library (imojify) to deal with colored emoji issue from imojify import imojify from matplotlib import pyplot as plt from matplotlib.offsetbox import OffsetImage,AnnotationBbox def offset_image(cords, emoji, ax): img = plt.imread(imojify.get_img_path(emoji)) im = OffsetImage(img, zoom=0.08) im.image.axes = ax ab = AnnotationBbox(im, (cords[0], cords[1]), frameon=False, pad=0) ax.add_artist(ab) emjis = ['', '', '', ''] pos =[0.2,0.4,0.6,0.8] fig, ax = plt.subplots(figsize=(12,8)) for i, e in enumerate(emjis): offset_image([pos[i],pos[i]], e, ax) the library contains images for all emojis, imojify.get_img_path(emoji) simply returns the path of the emoji image then you can use OffsetImage to add these images as labels, text, or whatever you want
How to render emojis in matplotlib with 'natural' colors?
I want to use emoji in a plot, which works with the correct font (Segoe UI Emoji on Windows), however I cannot figure out how to use the 'natural' colors. When rendered in a browser or MS Word, the glyphs have their own colors defined (I presume) by the font. In this example, they are grey and yellow. However they become monochrome when used in matplotlib and I can't seem to use 'auto' as a font color. fig, ax = plt.subplots() ax.text(.2, .2, '', fontname='Segoe UI Emoji', fontsize=20) ax.text(.4, .4, '', fontname='Segoe UI Emoji', fontsize=20) ax.text(.6, .6, '', fontname='Segoe UI Emoji', fontsize=20) ax.text(.8, .8, '', fontname='Segoe UI Emoji', fontsize=20) plt.show()
[ "i have created a small library (imojify) to deal with colored emoji issue\nfrom imojify import imojify\nfrom matplotlib import pyplot as plt \nfrom matplotlib.offsetbox import OffsetImage,AnnotationBbox\n\ndef offset_image(cords, emoji, ax):\n\n img = plt.imread(imojify.get_img_path(emoji))\n im = OffsetImage(img, zoom=0.08)\n im.image.axes = ax\n ab = AnnotationBbox(im, (cords[0], cords[1]), frameon=False, pad=0)\n ax.add_artist(ab)\n\n\nemjis = ['', '', '', '']\npos =[0.2,0.4,0.6,0.8]\n\nfig, ax = plt.subplots(figsize=(12,8))\n\nfor i, e in enumerate(emjis):\n offset_image([pos[i],pos[i]], e, ax)\n\nthe library contains images for all emojis,\nimojify.get_img_path(emoji) simply returns the path of the emoji image then you can use OffsetImage to add these images as labels, text, or whatever you want\n" ]
[ 0 ]
[]
[]
[ "emoji", "matplotlib", "python" ]
stackoverflow_0071038093_emoji_matplotlib_python.txt
Q: after 13th api call yfinance will not give any earnings data I have the following script to populate my database with yahoo finance information: from multiprocessing import Pool import json, time, yfinance import django django.setup() from dividends_info.functions.stock_info import save_stock_info_data from dividends_info.models import StockInfo with open('tickers/nyse_tickers.json') as tickers_file: TICKERS = json.load(tickers_file) TICKERS = TICKERS[0:100] # https://www.digitalocean.com/community/tutorials/python-multiprocessing-example def update_a_stock(ticker): stock, created = StockInfo.objects.get_or_create(ticker=ticker) yahoo_stock_obj = yfinance.Ticker(ticker.upper()) earnings_history = yahoo_stock_obj.earnings_history save_stock_info_data(yahoo_stock_obj, ticker, stock, earnings_history) if not stock.earnings: print(f"No earnings for {ticker}") def pool_handler(): start = time.time() p = Pool(2) p.map(update_a_stock, TICKERS) with open("time_taken_to_populate.txt", "w") as time_file: time_taken = round((time.time() - start), 2) time_file.write(f"Time taken = {time_taken:.10f}") if __name__ == '__main__': pool_handler() The important thing here is yahoo_stock_obj = yfinance.Ticker(ticker.upper()) earnings_history = yahoo_stock_obj.earnings_history whereas my save_stock_info_data prints out the type of the earnings history: ticker in save stock info func: ABEV <class 'pandas.core.frame.DataFrame'> ticker in save stock info func: A <class 'pandas.core.frame.DataFrame'> Saved new information for stock ABEV Saved new information for stock A ticker in save stock info func: ABG <class 'pandas.core.frame.DataFrame'> ticker in save stock info func: AA <class 'pandas.core.frame.DataFrame'> Saved new information for stock ABG Saved new information for stock AA ticker in save stock info func: ABM <class 'pandas.core.frame.DataFrame'> As you can see the script starts off fine, and for the first 13 calls it will save earnings data: python3 count_db_items.py 21 many stocks name: 18, summary: 18, dividends: 16, earnings: 13 After the first 13 earnings are saved, earnings_history is no longer available from the api: ticker in save stock info func: AB <class 'NoneType'> Could not find data for ACCO. ticker in save stock info func: ACCO <class 'NoneType'> Saved new information for stock AB No earnings for AB Saved new information for stock ACCO No earnings for ACCO Could not find data for ABB. ticker in save stock info func: ABB <class 'NoneType'> each earnings_history object gathered from the yahoo_stock_object of yfinance is 'None'. This happens each time the script is ran after the first 13 are saved. All the other data including dividends is available. I think dividends is sent as an array whereas earnings_history is sent as a pandas dataframe. When I ran the script for all tickers I had 1800 stocks, 1700 dividends and only 25 results for earnings. Running the script synchronously does not help in fact it is worse where NO earnings are saved at all: import json, os, sys, time, yfinance os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dividends_project.settings") sys.path.append('../..') import django django.setup() from dividends_info.functions.stock_info import save_stock_info_data from dividends_info.models import StockInfo # https://stackoverflow.com/questions/59159991/modulenotfounderror-no-module-named-foo-how-can-i-import-a-model-into-a-djang # https://pythonspeed.com/articles/python-multiprocessing/ # https://github.com/pytorch/pytorch/issues/3492 f = open('tickers/nyse_tickers.json') TICKERS = json.load(f) f.close() print(len(TICKERS)) TICKERS = TICKERS[:50] def update_a_stock(ticker): stock, created = StockInfo.objects.get_or_create(ticker=ticker) yahoo_stock_obj = yfinance.Ticker(ticker.upper()) earnings_history = yahoo_stock_obj.earnings_history # stock, yahoo_obj = save_stock_info_data(ticker, stock) save_stock_info_data(yahoo_stock_obj, ticker, stock, earnings_history) if not stock.earnings: print("earnings didn't save for this stock...the earnings are:") print(earnings_history) def run(): start = time.time() for ticker in TICKERS: update_a_stock(ticker) with open("time_taken_to_populate.txt", "w") as time_file: time_taken = round((time.time() - start), 2) time_file.write(f"Time taken = {time_taken:.10f}") if __name__ == "__main__": run() without the earnings data my website is pretty much useless. It doesn't seem a rate limiting issue since running a synchronous version of the script doesn't work either. If I access an individual stock it has earnings: import sys ticker = sys.argv[1] def update_a_stock(ticker): stock, created = StockInfo.objects.get_or_create(ticker=ticker) yahoo_stock_obj = yfinance.Ticker(ticker.upper()) earnings_history = yahoo_stock_obj.earnings_history earnings = gather_earnings_object in ipdb: ipdb> earnings [{'date': datetime.date(2022, 7, 29), 'expected': 3.31, 'actual': 3.37, 'surprise': '+1.69'}, {'date': datetime.date(2022, 4, 29), 'expected': 3.14, 'actual': 3.16, 'surprise': '+0.57'}, {'date': datetime.date(2022, 2, 2), 'expected': 3.29, 'actual': 3.31, 'surprise': '+0.73'}, {'date': datetime.date(2021, 10, 29), 'expected': 3.22, 'actual': 3.33, 'surprise': '+3.32'}, .... I would have to manually run my script for the single api call 10000 times to populate the data with all american stocks. How can I run an asynchronous script calling yfinance to populate my db? A: Here you go. import pandas_datareader as web import pandas as pd df = web.DataReader('AAPL', data_source='yahoo', start='2011-01-01', end='2021-01-12') df.head() import yfinance as yf aapl = yf.Ticker("AAPL") aapl # show earnings aapl.earnings aapl.quarterly_earnings Result: Revenue Earnings Quarter 4Q2021 123945000000 34630000000 1Q2022 97278000000 25010000000 2Q2022 82959000000 19442000000 3Q2022 90146000000 20721000000 There's all kinds of stuff available in the pandas-datareader library. https://pandas-datareader.readthedocs.io/en/latest/readers/yahoo.html
after 13th api call yfinance will not give any earnings data
I have the following script to populate my database with yahoo finance information: from multiprocessing import Pool import json, time, yfinance import django django.setup() from dividends_info.functions.stock_info import save_stock_info_data from dividends_info.models import StockInfo with open('tickers/nyse_tickers.json') as tickers_file: TICKERS = json.load(tickers_file) TICKERS = TICKERS[0:100] # https://www.digitalocean.com/community/tutorials/python-multiprocessing-example def update_a_stock(ticker): stock, created = StockInfo.objects.get_or_create(ticker=ticker) yahoo_stock_obj = yfinance.Ticker(ticker.upper()) earnings_history = yahoo_stock_obj.earnings_history save_stock_info_data(yahoo_stock_obj, ticker, stock, earnings_history) if not stock.earnings: print(f"No earnings for {ticker}") def pool_handler(): start = time.time() p = Pool(2) p.map(update_a_stock, TICKERS) with open("time_taken_to_populate.txt", "w") as time_file: time_taken = round((time.time() - start), 2) time_file.write(f"Time taken = {time_taken:.10f}") if __name__ == '__main__': pool_handler() The important thing here is yahoo_stock_obj = yfinance.Ticker(ticker.upper()) earnings_history = yahoo_stock_obj.earnings_history whereas my save_stock_info_data prints out the type of the earnings history: ticker in save stock info func: ABEV <class 'pandas.core.frame.DataFrame'> ticker in save stock info func: A <class 'pandas.core.frame.DataFrame'> Saved new information for stock ABEV Saved new information for stock A ticker in save stock info func: ABG <class 'pandas.core.frame.DataFrame'> ticker in save stock info func: AA <class 'pandas.core.frame.DataFrame'> Saved new information for stock ABG Saved new information for stock AA ticker in save stock info func: ABM <class 'pandas.core.frame.DataFrame'> As you can see the script starts off fine, and for the first 13 calls it will save earnings data: python3 count_db_items.py 21 many stocks name: 18, summary: 18, dividends: 16, earnings: 13 After the first 13 earnings are saved, earnings_history is no longer available from the api: ticker in save stock info func: AB <class 'NoneType'> Could not find data for ACCO. ticker in save stock info func: ACCO <class 'NoneType'> Saved new information for stock AB No earnings for AB Saved new information for stock ACCO No earnings for ACCO Could not find data for ABB. ticker in save stock info func: ABB <class 'NoneType'> each earnings_history object gathered from the yahoo_stock_object of yfinance is 'None'. This happens each time the script is ran after the first 13 are saved. All the other data including dividends is available. I think dividends is sent as an array whereas earnings_history is sent as a pandas dataframe. When I ran the script for all tickers I had 1800 stocks, 1700 dividends and only 25 results for earnings. Running the script synchronously does not help in fact it is worse where NO earnings are saved at all: import json, os, sys, time, yfinance os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dividends_project.settings") sys.path.append('../..') import django django.setup() from dividends_info.functions.stock_info import save_stock_info_data from dividends_info.models import StockInfo # https://stackoverflow.com/questions/59159991/modulenotfounderror-no-module-named-foo-how-can-i-import-a-model-into-a-djang # https://pythonspeed.com/articles/python-multiprocessing/ # https://github.com/pytorch/pytorch/issues/3492 f = open('tickers/nyse_tickers.json') TICKERS = json.load(f) f.close() print(len(TICKERS)) TICKERS = TICKERS[:50] def update_a_stock(ticker): stock, created = StockInfo.objects.get_or_create(ticker=ticker) yahoo_stock_obj = yfinance.Ticker(ticker.upper()) earnings_history = yahoo_stock_obj.earnings_history # stock, yahoo_obj = save_stock_info_data(ticker, stock) save_stock_info_data(yahoo_stock_obj, ticker, stock, earnings_history) if not stock.earnings: print("earnings didn't save for this stock...the earnings are:") print(earnings_history) def run(): start = time.time() for ticker in TICKERS: update_a_stock(ticker) with open("time_taken_to_populate.txt", "w") as time_file: time_taken = round((time.time() - start), 2) time_file.write(f"Time taken = {time_taken:.10f}") if __name__ == "__main__": run() without the earnings data my website is pretty much useless. It doesn't seem a rate limiting issue since running a synchronous version of the script doesn't work either. If I access an individual stock it has earnings: import sys ticker = sys.argv[1] def update_a_stock(ticker): stock, created = StockInfo.objects.get_or_create(ticker=ticker) yahoo_stock_obj = yfinance.Ticker(ticker.upper()) earnings_history = yahoo_stock_obj.earnings_history earnings = gather_earnings_object in ipdb: ipdb> earnings [{'date': datetime.date(2022, 7, 29), 'expected': 3.31, 'actual': 3.37, 'surprise': '+1.69'}, {'date': datetime.date(2022, 4, 29), 'expected': 3.14, 'actual': 3.16, 'surprise': '+0.57'}, {'date': datetime.date(2022, 2, 2), 'expected': 3.29, 'actual': 3.31, 'surprise': '+0.73'}, {'date': datetime.date(2021, 10, 29), 'expected': 3.22, 'actual': 3.33, 'surprise': '+3.32'}, .... I would have to manually run my script for the single api call 10000 times to populate the data with all american stocks. How can I run an asynchronous script calling yfinance to populate my db?
[ "Here you go.\nimport pandas_datareader as web\nimport pandas as pd\n \ndf = web.DataReader('AAPL', data_source='yahoo', start='2011-01-01', end='2021-01-12')\ndf.head()\n\nimport yfinance as yf\naapl = yf.Ticker(\"AAPL\")\naapl\n \n\n\n# show earnings\naapl.earnings\naapl.quarterly_earnings\n \n\nResult:\n Revenue Earnings\nQuarter \n4Q2021 123945000000 34630000000\n1Q2022 97278000000 25010000000\n2Q2022 82959000000 19442000000\n3Q2022 90146000000 20721000000\n\nThere's all kinds of stuff available in the pandas-datareader library.\nhttps://pandas-datareader.readthedocs.io/en/latest/readers/yahoo.html\n" ]
[ 0 ]
[]
[]
[ "multithreading", "python", "python_multiprocessing", "yfinance" ]
stackoverflow_0073724165_multithreading_python_python_multiprocessing_yfinance.txt
Q: Python Pandas calculate standard deviation excluding current group, with vectorization solution So i want to calculate standard deviation excluding current group using groupby. Here an example of the data: import pandas as pd df = pd.DataFrame ({ 'group' : ['A','A','A','A','A','A','B','B','B','B','B','B'], 'team' : ['1','1','2','2','3','3','1','1','2','2','3','3',] 'value' : [1,2,5,7,2,3,7,8,8,9,6,4] }) For example, for group A team 1, i want to calculate the std dev of team 2 and 3, for group A team 2, i want to calculate the std dev of group 1 and 3, and so on. I managed to do it using groupby and apply but when using it on real data with literally milion of rows, it takes too long. So i am looking for a solution with vectorization. def std(row, data): data = data.loc[data['group']==row['group]] return data.groupby(['team']).filter(lambda x:(x['tool]!=row['team']).all())['value'].std() df['std_exclude'] = df.apply(lambda x: std(data=df),axis=1) A: You can use transform after combining group and team as a list: df['std'] = (df.assign(new=df[['group', 'team']].values.tolist())['new'].transform( lambda x: df[df['group'].eq(x[0]) & df['team'].ne(x[1])]['value'].std())) Output: group team value std 0 A 1 1 2.217356 1 A 1 2 2.217356 2 A 2 5 0.816497 3 A 2 7 0.816497 4 A 3 2 2.753785 5 A 3 3 2.753785 6 B 1 7 2.217356 7 B 1 8 2.217356 8 B 2 8 1.707825 9 B 2 9 1.707825 10 B 3 6 0.816497 11 B 3 4 0.816497 There are some equal std values across different groups but you can verify that their std values are indeed equal.
Python Pandas calculate standard deviation excluding current group, with vectorization solution
So i want to calculate standard deviation excluding current group using groupby. Here an example of the data: import pandas as pd df = pd.DataFrame ({ 'group' : ['A','A','A','A','A','A','B','B','B','B','B','B'], 'team' : ['1','1','2','2','3','3','1','1','2','2','3','3',] 'value' : [1,2,5,7,2,3,7,8,8,9,6,4] }) For example, for group A team 1, i want to calculate the std dev of team 2 and 3, for group A team 2, i want to calculate the std dev of group 1 and 3, and so on. I managed to do it using groupby and apply but when using it on real data with literally milion of rows, it takes too long. So i am looking for a solution with vectorization. def std(row, data): data = data.loc[data['group']==row['group]] return data.groupby(['team']).filter(lambda x:(x['tool]!=row['team']).all())['value'].std() df['std_exclude'] = df.apply(lambda x: std(data=df),axis=1)
[ "You can use transform after combining group and team as a list:\ndf['std'] = (df.assign(new=df[['group', 'team']].values.tolist())['new'].transform(\n lambda x: df[df['group'].eq(x[0]) & df['team'].ne(x[1])]['value'].std())) \n\nOutput:\ngroup team value std\n0 A 1 1 2.217356\n1 A 1 2 2.217356\n2 A 2 5 0.816497\n3 A 2 7 0.816497\n4 A 3 2 2.753785\n5 A 3 3 2.753785\n6 B 1 7 2.217356\n7 B 1 8 2.217356\n8 B 2 8 1.707825\n9 B 2 9 1.707825\n10 B 3 6 0.816497\n11 B 3 4 0.816497\n\nThere are some equal std values across different groups but you can verify that their std values are indeed equal.\n" ]
[ 1 ]
[]
[]
[ "pandas", "python", "standard_deviation", "vectorization" ]
stackoverflow_0074508088_pandas_python_standard_deviation_vectorization.txt
Q: Copy previous value row if not nan based on another column if value I have a Data Frame d1 where I would like to copy over the values of all rows in all columns when column 'C' is lower than 10k. Obtaining the result indicated on d2. This without overwriting the values of the row in case is different from Nan. On my example i have all values equal to '1' but on my real dataframe some of them are also different. A: If my understanding was correct, df2 would be seen as an unexpected result, and the expected result would be To achieve the result shown by the above screenshot, we only need to run the below code df_result = df1[df1.C < 1000] In addition, it would be better (closer to convention) to use np.nan than nan, with import numpy as np. Hope it helps.
Copy previous value row if not nan based on another column if value
I have a Data Frame d1 where I would like to copy over the values of all rows in all columns when column 'C' is lower than 10k. Obtaining the result indicated on d2. This without overwriting the values of the row in case is different from Nan. On my example i have all values equal to '1' but on my real dataframe some of them are also different.
[ "If my understanding was correct, df2 would be seen as an unexpected result, and the expected result would be\n\nTo achieve the result shown by the above screenshot, we only need to run the below code\ndf_result = df1[df1.C < 1000]\n\nIn addition, it would be better (closer to convention) to use np.nan than nan, with import numpy as np.\nHope it helps.\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074508053_dataframe_pandas_python.txt
Q: Importing pickle file into mysql gives an error "ProgrammingError: not enough arguments for format string" I'm trying to insert data from .pickle file into MySQL. I'm getting an error "ProgrammingError: not enough arguments for format string". As I understand, this error happens due to the count of placeholders (%s) being greater than the count of values for formating/templating. But in my case they are equal. cursor.execute("""INSERT INTO 'directors'('id', 'first_name', 'last_name') VALUES (%s,%s,%s)""", (result, )) import _pickle as cPickle import pandas as pd import MySQLdb csv_file = 'NEW_DIRECTORS.csv' filename = "myfile.pickle" df = pd.read_csv(csv_file) data_to_save = df with open(filename,'wb') as file_handle: cPickle.dump(data_to_save, file_handle) with open(filename,'rb') as file_handle: result = cPickle.load(file_handle) print(result) *** id first_name last_name 0 31 Paul Aaron 1 32 Evan Aaronson 2 33 Reuben Aaronson 3 34 Heikki Aarva connection = MySQLdb.connect('localhost','root','','movies') cursor = connection.cursor() cursor.execute("""INSERT INTO `directors`(`id`, `first_name`, `last_name`) VALUES (%s,%s,%s)""", (result, )) Tried also without quotes: cursor.execute("""INSERT INTO directors(id, first_name, last_name) VALUES (%s,%s,%s)""", (result, )) Same error. Reference: http://blog.cameronleger.com/2011/05/31/python-example-pickling-things-into-mysql-databases/ repr(result) gives me: ' id first_name last_name\n0 31 Paul Aaron\n1 32 Evan Aaronson\n2 33 Reuben Aaronson\n3 34 Heikki Aarva' A: result is a DataFrame, so you could convert each row to a tuple and write the tuple to the database: for t in result.itertuples(): # We don't want the index, which is the first element. values = t[1:] cursor.execute("""INSERT INTO directors(id, first_name, last_name) VALUES (%s,%s,%s)""", values) conn.commit() This more compact code is equivalent: row_data = [t[1:] for t in result.itertuples()] cursor.executemany("""INSERT INTO directors(id, first_name, last_name) VALUES (%s,%s,%s)""", row_data) conn.commit() However note that you would usually just use the dataframe's to_sql method, as described in the answers here.
Importing pickle file into mysql gives an error "ProgrammingError: not enough arguments for format string"
I'm trying to insert data from .pickle file into MySQL. I'm getting an error "ProgrammingError: not enough arguments for format string". As I understand, this error happens due to the count of placeholders (%s) being greater than the count of values for formating/templating. But in my case they are equal. cursor.execute("""INSERT INTO 'directors'('id', 'first_name', 'last_name') VALUES (%s,%s,%s)""", (result, )) import _pickle as cPickle import pandas as pd import MySQLdb csv_file = 'NEW_DIRECTORS.csv' filename = "myfile.pickle" df = pd.read_csv(csv_file) data_to_save = df with open(filename,'wb') as file_handle: cPickle.dump(data_to_save, file_handle) with open(filename,'rb') as file_handle: result = cPickle.load(file_handle) print(result) *** id first_name last_name 0 31 Paul Aaron 1 32 Evan Aaronson 2 33 Reuben Aaronson 3 34 Heikki Aarva connection = MySQLdb.connect('localhost','root','','movies') cursor = connection.cursor() cursor.execute("""INSERT INTO `directors`(`id`, `first_name`, `last_name`) VALUES (%s,%s,%s)""", (result, )) Tried also without quotes: cursor.execute("""INSERT INTO directors(id, first_name, last_name) VALUES (%s,%s,%s)""", (result, )) Same error. Reference: http://blog.cameronleger.com/2011/05/31/python-example-pickling-things-into-mysql-databases/ repr(result) gives me: ' id first_name last_name\n0 31 Paul Aaron\n1 32 Evan Aaronson\n2 33 Reuben Aaronson\n3 34 Heikki Aarva'
[ "result is a DataFrame, so you could convert each row to a tuple and write the tuple to the database:\nfor t in result.itertuples():\n # We don't want the index, which is the first element.\n values = t[1:]\n cursor.execute(\"\"\"INSERT INTO directors(id, first_name, last_name) VALUES (%s,%s,%s)\"\"\", values)\nconn.commit()\n\nThis more compact code is equivalent:\nrow_data = [t[1:] for t in result.itertuples()]\ncursor.executemany(\"\"\"INSERT INTO directors(id, first_name, last_name) VALUES (%s,%s,%s)\"\"\", row_data)\nconn.commit()\n\nHowever note that you would usually just use the dataframe's to_sql method, as described in the answers here.\n" ]
[ 0 ]
[]
[]
[ "mysql", "pandas", "pickle", "python", "sql" ]
stackoverflow_0074503135_mysql_pandas_pickle_python_sql.txt
Q: Is there any way to plot emojis in matplotlib? Does anybody know how to plot emojis in matplotlib while using windows? I've been struggling to find a solution as most out there seem to be specific for macOS. Below is my current graph showing emojis plotted in a vector space, but as usual most do not show up. Is there perhaps any fonts already installed with matplotlib that provide emoji support or will I need to install some backend solutions? Code: def display_pca_scatterplot(model, words=None, sample=0): if words == None: if sample > 0: words = np.random.choice(list(model.vocab.keys()), sample) else: words = [ word for word in model.vocab ] prop = FontProperties(fname='/usr/share/fonts/truetype/noto/Apple Color Emoji.ttc') word_vectors = np.array([model[w] for w in words]) twodim = PCA().fit_transform(word_vectors)[:,:2] sb.set_style("darkgrid") plt.figure(figsize=(10,10)) plt.scatter(twodim[:,0], twodim[:,1]) #, edgecolors='w', color='w') for word, (x,y) in zip(words, twodim): plt.text(x+0.0, y+0.0, word, fontsize=20) #fontproperties=prop) A: This seems to work for me , but apparently depends on default fonts (eg "Segoe UI Emoji") being installed: plt.text(0,.5,' ☺️ ',fontsize=20) A: i have created a small library (imojify) to deal with that issue from imojify import imojify from matplotlib import pyplot as plt from matplotlib.offsetbox import OffsetImage,AnnotationBbox def offset_image(cords, emoji, ax): img = plt.imread(imojify.get_img_path(emoji)) im = OffsetImage(img, zoom=0.08) im.image.axes = ax ab = AnnotationBbox(im, (cords[0], cords[1]), frameon=False, pad=0) ax.add_artist(ab) emjis = ['', '', '', '','', '', ''] values =[30, 50, 15, 29, 15, 50, 12] fig, ax = plt.subplots(figsize=(12,8)) ax.bar(range(len(emjis)), values, width=0.5,align="center") ax.set_xticks(range(len(emjis))) ax.set_xticklabels([]) ax.tick_params(axis='x', which='major', pad=26) ax.set_ylim((0, ax.get_ylim()[1]+10)) for i, e in enumerate(emjis): offset_image([i,values[i]+5], e, ax) the library contains images for all emojis, imojify.get_img_path(emoji) simply returns the path of the emoji image then you can use OffsetImage to add these images as labels
Is there any way to plot emojis in matplotlib?
Does anybody know how to plot emojis in matplotlib while using windows? I've been struggling to find a solution as most out there seem to be specific for macOS. Below is my current graph showing emojis plotted in a vector space, but as usual most do not show up. Is there perhaps any fonts already installed with matplotlib that provide emoji support or will I need to install some backend solutions? Code: def display_pca_scatterplot(model, words=None, sample=0): if words == None: if sample > 0: words = np.random.choice(list(model.vocab.keys()), sample) else: words = [ word for word in model.vocab ] prop = FontProperties(fname='/usr/share/fonts/truetype/noto/Apple Color Emoji.ttc') word_vectors = np.array([model[w] for w in words]) twodim = PCA().fit_transform(word_vectors)[:,:2] sb.set_style("darkgrid") plt.figure(figsize=(10,10)) plt.scatter(twodim[:,0], twodim[:,1]) #, edgecolors='w', color='w') for word, (x,y) in zip(words, twodim): plt.text(x+0.0, y+0.0, word, fontsize=20) #fontproperties=prop)
[ "This seems to work for me , but apparently depends on default fonts (eg \"Segoe UI Emoji\") being installed:\nplt.text(0,.5,' ☺️ ',fontsize=20)\n\n\n", "i have created a small library (imojify) to deal with that issue\nfrom imojify import imojify\nfrom matplotlib import pyplot as plt \nfrom matplotlib.offsetbox import OffsetImage,AnnotationBbox\ndef offset_image(cords, emoji, ax):\n img = plt.imread(imojify.get_img_path(emoji))\n im = OffsetImage(img, zoom=0.08)\n im.image.axes = ax\n ab = AnnotationBbox(im, (cords[0], cords[1]), frameon=False, pad=0)\n ax.add_artist(ab)\n\n\n\nemjis = ['', '', '', '','', '', '']\nvalues =[30, 50, 15, 29, 15, 50, 12]\n\nfig, ax = plt.subplots(figsize=(12,8))\nax.bar(range(len(emjis)), values, width=0.5,align=\"center\")\nax.set_xticks(range(len(emjis)))\nax.set_xticklabels([])\nax.tick_params(axis='x', which='major', pad=26)\nax.set_ylim((0, ax.get_ylim()[1]+10))\n\nfor i, e in enumerate(emjis):\n offset_image([i,values[i]+5], e, ax)\n\nthe library contains images for all emojis,\nimojify.get_img_path(emoji) simply returns the path of the emoji image then you can use OffsetImage to add these images as labels\n" ]
[ 1, 0 ]
[]
[]
[ "emoji", "matplotlib", "python", "seaborn", "windows" ]
stackoverflow_0061701600_emoji_matplotlib_python_seaborn_windows.txt
Q: How to save the value entered by the user in the calculated field? I made the computed field editable using the inverse field, but when I entering the value manually, when saving it is replaced with the value from _compute_test, how can I save the value entered manually? My .py file: class SaleOrderInherited(models.Model): _inherit = 'sale.order' custom_field = fields.Char(string='Test', compute="_compute_test", inverse="_inverse_compute_test") #warning = fields.Boolean(default=False) @api.depends('tax_totals_json', 'date_order') def _compute_test(self): for record in self: if int(json.loads(record.tax_totals_json)['amount_total']) == 0: record.custom_field = randint(1, 1000) else: record.custom_field = f"{json.loads(record.tax_totals_json)['amount_total']} - {record.date_order}" def _inverse_compute_test(self): pass My .xml file: <odoo> <data> <!--Inherit the sale order form view--> <record id="view_sale_order_custom" model="ir.ui.view"> <field name="name">sale.order.custom.form.inherited</field> <field name="model">sale.order</field> <field name="inherit_id" ref="sale.view_order_form"/> <field name="arch" type="xml"> <xpath expr="//field[@name='partner_id']" position="after"> <field name="custom_field"/> </xpath> </field> </record> </data> </odoo> I tried using force_save, but it doesn't help, and I didn't find another way. A: Use store=True attribute in your field. You also need to enforce your compute method logic so it doesn't always override field's value. force_save attribute is used for read-only fields, which would be ignored from create and write methods instead.
How to save the value entered by the user in the calculated field?
I made the computed field editable using the inverse field, but when I entering the value manually, when saving it is replaced with the value from _compute_test, how can I save the value entered manually? My .py file: class SaleOrderInherited(models.Model): _inherit = 'sale.order' custom_field = fields.Char(string='Test', compute="_compute_test", inverse="_inverse_compute_test") #warning = fields.Boolean(default=False) @api.depends('tax_totals_json', 'date_order') def _compute_test(self): for record in self: if int(json.loads(record.tax_totals_json)['amount_total']) == 0: record.custom_field = randint(1, 1000) else: record.custom_field = f"{json.loads(record.tax_totals_json)['amount_total']} - {record.date_order}" def _inverse_compute_test(self): pass My .xml file: <odoo> <data> <!--Inherit the sale order form view--> <record id="view_sale_order_custom" model="ir.ui.view"> <field name="name">sale.order.custom.form.inherited</field> <field name="model">sale.order</field> <field name="inherit_id" ref="sale.view_order_form"/> <field name="arch" type="xml"> <xpath expr="//field[@name='partner_id']" position="after"> <field name="custom_field"/> </xpath> </field> </record> </data> </odoo> I tried using force_save, but it doesn't help, and I didn't find another way.
[ "Use store=True attribute in your field.\nYou also need to enforce your compute method logic so it doesn't always override field's value.\nforce_save attribute is used for read-only fields, which would be ignored from create and write methods instead.\n" ]
[ 0 ]
[]
[]
[ "odoo", "odoo_15", "python", "python_3.x" ]
stackoverflow_0074507731_odoo_odoo_15_python_python_3.x.txt
Q: Unable to understand dictionaries behavior when simulating a linked list I am trying to simulate linked lists in python using dictionaries - h (stands for head) and t (stands for tail): t = {"value": 5, "next": None} h = t I add a new node n1 as value of the key "next" in t: n1 = {"value": 10, "next": None} t["next"] = n1 print(t) # {'value': 5, 'next': {'value': 10, 'next': None}} print(h) # {'value': 5, 'next': {'value': 10, 'next': None}} I understand that is because both h and t are referring to the same memory address at this moment. This is also confirmed by print(id(h)) # 2429471179008 print(id(t)) # 2429471179008 I now changed the value of t to the node n1: t = n1 print(t) # {'value': 10, 'next': None} print(h) # {'value': 5, 'next': {'value': 10, 'next': None}} My understanding is that at this point h and t will start referring to different memory addresses. This is also confirmed by print(id(h)) # 2429471179008 print(id(t)) # 2429470939776 Now I add one more new node n2 as value of the key "next" in t: n2 = {"value": 16, "next": None} t["next"] = n2 print(t) # {'value': 10, 'next': {'value': 16, 'next': None}} print(h) # {'value': 5, 'next': {'value': 10, 'next': {'value': 16, 'next': None}}} Why is that so? How did the change to t impact h and that too in this fashion? I was expecting the output of print(h) to still show up as {'value': 5, 'next': {'value': 10, 'next': None}}. A: My understanding is that at this point h and t will start referring to different memory addresses. True, but h["next"] and t reference the same. Here is a visualisation of all the actions you performed: t = {"value": 5, "next": None} h = t The resulting state can be pictured like this: t h ↓ ↓ ┌────────────┐ │ value: 5 │ │ next: None │ └────────────┘ n1 = {"value": 10, "next": None} t["next"] = n1 The resulting state can be pictured like this: t h n1 ↓ ↓ ↓ ┌────────────┐ ┌────────────┐ │ value: 5 │ │ value: 10 │ │ next: ────────►│ next: None │ └────────────┘ └────────────┘ Then you prepare a next operation with: t = n1 h n1 t ↓ ↓ ↓ ┌────────────┐ ┌────────────┐ │ value: 5 │ │ value: 10 │ │ next: ────────►│ next: None │ └────────────┘ └────────────┘ Here you can already see that although h and t reference different nodes, they are not independent. h["next"] references the same node as t. Then the script continues with: n2 = {"value": 16, "next": None} The resulting state can be pictured like this: h n1 t n2 ↓ ↓ ↓ ↓ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ value: 5 │ │ value: 10 │ │ value: 16 │ │ next: ────────►│ next: None │ │ next: None │ └────────────┘ └────────────┘ └────────────┘ The following statement: t["next"] = n2 ...leads to this: h n1 t n2 ↓ ↓ ↓ ↓ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ value: 5 │ │ value: 10 │ │ value: 16 │ │ next: ────────►│ next: ────────►│ next: None │ └────────────┘ └────────────┘ └────────────┘ Here we have t still referencing a sublist of h. I hope this clarifies it.
Unable to understand dictionaries behavior when simulating a linked list
I am trying to simulate linked lists in python using dictionaries - h (stands for head) and t (stands for tail): t = {"value": 5, "next": None} h = t I add a new node n1 as value of the key "next" in t: n1 = {"value": 10, "next": None} t["next"] = n1 print(t) # {'value': 5, 'next': {'value': 10, 'next': None}} print(h) # {'value': 5, 'next': {'value': 10, 'next': None}} I understand that is because both h and t are referring to the same memory address at this moment. This is also confirmed by print(id(h)) # 2429471179008 print(id(t)) # 2429471179008 I now changed the value of t to the node n1: t = n1 print(t) # {'value': 10, 'next': None} print(h) # {'value': 5, 'next': {'value': 10, 'next': None}} My understanding is that at this point h and t will start referring to different memory addresses. This is also confirmed by print(id(h)) # 2429471179008 print(id(t)) # 2429470939776 Now I add one more new node n2 as value of the key "next" in t: n2 = {"value": 16, "next": None} t["next"] = n2 print(t) # {'value': 10, 'next': {'value': 16, 'next': None}} print(h) # {'value': 5, 'next': {'value': 10, 'next': {'value': 16, 'next': None}}} Why is that so? How did the change to t impact h and that too in this fashion? I was expecting the output of print(h) to still show up as {'value': 5, 'next': {'value': 10, 'next': None}}.
[ "\nMy understanding is that at this point h and t will start referring to different memory addresses.\n\nTrue, but h[\"next\"] and t reference the same.\nHere is a visualisation of all the actions you performed:\nt = {\"value\": 5, \"next\": None}\nh = t\n\nThe resulting state can be pictured like this:\n t h\n ↓ ↓\n┌────────────┐\n│ value: 5 │\n│ next: None │\n└────────────┘\n\nn1 = {\"value\": 10, \"next\": None}\nt[\"next\"] = n1\n\nThe resulting state can be pictured like this:\n t h n1\n ↓ ↓ ↓\n┌────────────┐ ┌────────────┐\n│ value: 5 │ │ value: 10 │\n│ next: ────────►│ next: None │\n└────────────┘ └────────────┘\n\nThen you prepare a next operation with:\nt = n1\n\n h n1 t\n ↓ ↓ ↓\n┌────────────┐ ┌────────────┐\n│ value: 5 │ │ value: 10 │\n│ next: ────────►│ next: None │\n└────────────┘ └────────────┘\n\nHere you can already see that although h and t reference different nodes, they are not independent. h[\"next\"] references the same node as t.\nThen the script continues with:\nn2 = {\"value\": 16, \"next\": None}\n\nThe resulting state can be pictured like this:\n h n1 t n2\n ↓ ↓ ↓ ↓\n┌────────────┐ ┌────────────┐ ┌────────────┐\n│ value: 5 │ │ value: 10 │ │ value: 16 │\n│ next: ────────►│ next: None │ │ next: None │\n└────────────┘ └────────────┘ └────────────┘\n\nThe following statement:\nt[\"next\"] = n2\n\n...leads to this:\n h n1 t n2\n ↓ ↓ ↓ ↓\n┌────────────┐ ┌────────────┐ ┌────────────┐\n│ value: 5 │ │ value: 10 │ │ value: 16 │\n│ next: ────────►│ next: ────────►│ next: None │\n└────────────┘ └────────────┘ └────────────┘\n\nHere we have t still referencing a sublist of h.\nI hope this clarifies it.\n" ]
[ 0 ]
[]
[]
[ "dictionary", "linked_list", "python" ]
stackoverflow_0074507361_dictionary_linked_list_python.txt
Q: Is there an easy way to use DBSCAN in python with dimensions higher than 2? I've been working on a machine learning project using clustering algorithms, and I'm looking into using scikit-learn's DBSCAN implementation based on the data that I'm working with. However, whenever I try to run it with my feature arrays, it throws the following error: ValueError: Found array with dim 3. Estimator expected <= 2. This gives me the impression that scikit's DBSCAN only supports two-dimensional features. Am I wrong in thinking this? If not, is there an implementation of DBSCAN that supports higher-dimensional feature arrays? Thanks for any help you can offer. Edit Here's the code that I'm using for my DBSCAN script. The idea is to read data from a number of different CSVs, save them into an array, and then dump them into a pickle file so that the model can load them in the future and run DBSCAN. def get_clusters(fileList, arraySavePath): # Create empty array fitting = []; # Get values from all files, save to singular array for filePath in fileList: df = pd.read_csv(filePath, usecols=use_cols); fitting.append(df.values.tolist()); # Save array to it's own csv file with open(arraySavePath, "wb") as fp: pickle.dump(fitting, fp); def predict_cluster(modelPath, predictInput): # Load the cluster data with open(modelPath, "rb") as fp: fitting = pickle.load(fp); # DBSCAN fit clustering = DBSCAN(eps=3, min_samples=2); clustering.fit(fitting); # Predict the label return clustering.predict_fit(predictInput); A: I believe the issue is with the "min_samples" parameter. The data you're fitting contains 3 features/dimensions but you've set "min_samples=2". Min_samples has to be equal to or greater than the number of features in your dataset. A: I have an example of DBSCAN on my blog. import statsmodels.api as sm import numpy as np import pandas as pd mtcars = sm.datasets.get_rdataset("mtcars", "datasets", cache=True).data df_cars = pd.DataFrame(mtcars) df_cars.head() from numpy import unique from numpy import where from sklearn.datasets import make_classification from sklearn.cluster import KMeans from matplotlib import pyplot # define dataset X = df_cars[['mpg','hp']] # define the model model = KMeans(n_clusters=8) # fit the model model.fit(X) # assign a cluster to each example yhat = model.predict(X) X['kmeans']=yhat pyplot.scatter(X['mpg'], X['hp'], c=X['kmeans'], cmap='rainbow', s=50, alpha=0.8) from sklearn.cluster import DBSCAN model = DBSCAN(eps=0.30, min_samples=9) #predict the labels of clusters. label = model.fit_predict(df_cars) label df_cars['dbscan'] = label df_cars https://github.com/ASH-WICUS/Notebooks/blob/master/Clustering%20Algorithms%20Compared.ipynb
Is there an easy way to use DBSCAN in python with dimensions higher than 2?
I've been working on a machine learning project using clustering algorithms, and I'm looking into using scikit-learn's DBSCAN implementation based on the data that I'm working with. However, whenever I try to run it with my feature arrays, it throws the following error: ValueError: Found array with dim 3. Estimator expected <= 2. This gives me the impression that scikit's DBSCAN only supports two-dimensional features. Am I wrong in thinking this? If not, is there an implementation of DBSCAN that supports higher-dimensional feature arrays? Thanks for any help you can offer. Edit Here's the code that I'm using for my DBSCAN script. The idea is to read data from a number of different CSVs, save them into an array, and then dump them into a pickle file so that the model can load them in the future and run DBSCAN. def get_clusters(fileList, arraySavePath): # Create empty array fitting = []; # Get values from all files, save to singular array for filePath in fileList: df = pd.read_csv(filePath, usecols=use_cols); fitting.append(df.values.tolist()); # Save array to it's own csv file with open(arraySavePath, "wb") as fp: pickle.dump(fitting, fp); def predict_cluster(modelPath, predictInput): # Load the cluster data with open(modelPath, "rb") as fp: fitting = pickle.load(fp); # DBSCAN fit clustering = DBSCAN(eps=3, min_samples=2); clustering.fit(fitting); # Predict the label return clustering.predict_fit(predictInput);
[ "I believe the issue is with the \"min_samples\" parameter. The data you're fitting contains 3 features/dimensions but you've set \"min_samples=2\". Min_samples has to be equal to or greater than the number of features in your dataset.\n", "I have an example of DBSCAN on my blog.\nimport statsmodels.api as sm\nimport numpy as np\nimport pandas as pd\n\nmtcars = sm.datasets.get_rdataset(\"mtcars\", \"datasets\", cache=True).data\ndf_cars = pd.DataFrame(mtcars)\ndf_cars.head()\n\nfrom numpy import unique\nfrom numpy import where\nfrom sklearn.datasets import make_classification\nfrom sklearn.cluster import KMeans\nfrom matplotlib import pyplot\n\n# define dataset\nX = df_cars[['mpg','hp']]\n\n\n# define the model\nmodel = KMeans(n_clusters=8)\n# fit the model\nmodel.fit(X)\n\n# assign a cluster to each example\nyhat = model.predict(X)\n\nX['kmeans']=yhat\n\npyplot.scatter(X['mpg'], X['hp'], c=X['kmeans'], cmap='rainbow', s=50, alpha=0.8)\n\n\nfrom sklearn.cluster import DBSCAN\nmodel = DBSCAN(eps=0.30, min_samples=9)\n\n#predict the labels of clusters.\nlabel = model.fit_predict(df_cars)\nlabel\n\ndf_cars['dbscan'] = label\ndf_cars\n\n\nhttps://github.com/ASH-WICUS/Notebooks/blob/master/Clustering%20Algorithms%20Compared.ipynb\n" ]
[ 1, 0 ]
[]
[]
[ "cluster_analysis", "dbscan", "python", "scikit_learn" ]
stackoverflow_0061277791_cluster_analysis_dbscan_python_scikit_learn.txt
Q: How to write a conditional statement based on combination of two columns and a dictionary, using the dictionary for a mapping in a new column? I am working with a pandas dataframe (the dataframe is called market_info_df): And I have the following Python code: market_info_df['is_and_mp'] = market_info_df['issue_status'] + market_info_df['market_phase'] no_collision_issue_status = ['000', '200', '203', '204', '300'] MARKET_STATES_DICT = { ('000', ' '): MARKET_STATES.CLOSED, ('100', ' ', 'F'): MARKET_STATES.OPENING_AUCTION, ('200', ' '): MARKET_STATES.CONTINUOUS_TRADING, ('203', ' '): MARKET_STATES.UNSCHEDULED_AUCTION, ('204', ' '): MARKET_STATES.UNSCHEDULED_AUCTION, ('100', 'B0'): MARKET_STATES.UNSCHEDULED_AUCTION, ('200', 'B1'): MARKET_STATES.CONTINUOUS_TRADING, ('400', 'C0'): MARKET_STATES.HALTED, ('400', 'C1'): MARKET_STATES.CONTINUOUS_TRADING, ('400', 'D0'): MARKET_STATES.HALTED, ('400', 'D1'): MARKET_STATES.POST_TRADE} I am trying to write a condition such that if the is_and_mp is in the no_collision_issue_status list, OR the trading_status is not , then use the MARKET_STATES_DICT to map a new column called market_state. Here is what I have written, but I get an error TypeError: unhashable type: 'Series': market_info_df.loc[(market_info_df['is_and_mp'] in no_collision_issue_status) | (~market_info_df['trading_state'] == ' '), 'market_state'] = MARKET_STATES_DICT[(market_info_df['issue_status'], market_info_df['trading_state'])] I understand what is wrong and why I am getting the error, but I am not sure how to fix it! A: Use apply function on dataframe. Check for the desired condition as you have written. If true then return the value from dict else return None: market_info_df["market_state"] = market_info_df.apply(lambda row: MARKET_STATES_DICT[(row["is_and_mp"],row["trading_status"])] if row["is_and_mp"] in no_collision_issue_status or row["trading_status"] != " " else None, axis=1) Full example with dummy data: market_info_df = pd.DataFrame(data=[["10","0","B0"],["20","0"," "],["40","0","D1"]], columns=["issue_status", "market_phase", "trading_status"]) market_info_df['is_and_mp'] = market_info_df['issue_status'] + market_info_df['market_phase'] no_collision_issue_status = ['000', '200', '203', '204', '300'] MARKET_STATES_DICT = { ('000', ' '): "CLOSED", ('100', ' ', 'F'): "OPENING_AUCTION", ('200', ' '): "CONTINUOUS_TRADING", ('203', ' '): "UNSCHEDULED_AUCTION", ('204', ' '): "UNSCHEDULED_AUCTION", ('100', 'B0'): "UNSCHEDULED_AUCTION", ('200', 'B1'): "CONTINUOUS_TRADING", ('400', 'C0'): "HALTED", ('400', 'C1'): "CONTINUOUS_TRADING", ('400', 'D0'): "HALTED", ('400', 'D1'): "POST_TRADE"} market_info_df["market_state"] = market_info_df.apply(lambda row: MARKET_STATES_DICT[(row["is_and_mp"],row["trading_status"])] if row["is_and_mp"] in no_collision_issue_status or row["trading_status"] != " " else None, axis=1) [Out]: issue_status market_phase trading_status is_and_mp market_state 0 10 0 B0 100 UNSCHEDULED_AUCTION 1 20 0 200 CONTINUOUS_TRADING 2 40 0 D1 400 POST_TRADE
How to write a conditional statement based on combination of two columns and a dictionary, using the dictionary for a mapping in a new column?
I am working with a pandas dataframe (the dataframe is called market_info_df): And I have the following Python code: market_info_df['is_and_mp'] = market_info_df['issue_status'] + market_info_df['market_phase'] no_collision_issue_status = ['000', '200', '203', '204', '300'] MARKET_STATES_DICT = { ('000', ' '): MARKET_STATES.CLOSED, ('100', ' ', 'F'): MARKET_STATES.OPENING_AUCTION, ('200', ' '): MARKET_STATES.CONTINUOUS_TRADING, ('203', ' '): MARKET_STATES.UNSCHEDULED_AUCTION, ('204', ' '): MARKET_STATES.UNSCHEDULED_AUCTION, ('100', 'B0'): MARKET_STATES.UNSCHEDULED_AUCTION, ('200', 'B1'): MARKET_STATES.CONTINUOUS_TRADING, ('400', 'C0'): MARKET_STATES.HALTED, ('400', 'C1'): MARKET_STATES.CONTINUOUS_TRADING, ('400', 'D0'): MARKET_STATES.HALTED, ('400', 'D1'): MARKET_STATES.POST_TRADE} I am trying to write a condition such that if the is_and_mp is in the no_collision_issue_status list, OR the trading_status is not , then use the MARKET_STATES_DICT to map a new column called market_state. Here is what I have written, but I get an error TypeError: unhashable type: 'Series': market_info_df.loc[(market_info_df['is_and_mp'] in no_collision_issue_status) | (~market_info_df['trading_state'] == ' '), 'market_state'] = MARKET_STATES_DICT[(market_info_df['issue_status'], market_info_df['trading_state'])] I understand what is wrong and why I am getting the error, but I am not sure how to fix it!
[ "Use apply function on dataframe. Check for the desired condition as you have written. If true then return the value from dict else return None:\nmarket_info_df[\"market_state\"] = market_info_df.apply(lambda row: MARKET_STATES_DICT[(row[\"is_and_mp\"],row[\"trading_status\"])] if row[\"is_and_mp\"] in no_collision_issue_status or row[\"trading_status\"] != \" \" else None, axis=1)\n\nFull example with dummy data:\nmarket_info_df = pd.DataFrame(data=[[\"10\",\"0\",\"B0\"],[\"20\",\"0\",\" \"],[\"40\",\"0\",\"D1\"]], columns=[\"issue_status\", \"market_phase\", \"trading_status\"])\n\nmarket_info_df['is_and_mp'] = market_info_df['issue_status'] + market_info_df['market_phase']\n\nno_collision_issue_status = ['000', '200', '203', '204', '300']\n\nMARKET_STATES_DICT = {\n('000', ' '): \"CLOSED\",\n('100', ' ', 'F'): \"OPENING_AUCTION\",\n('200', ' '): \"CONTINUOUS_TRADING\",\n('203', ' '): \"UNSCHEDULED_AUCTION\",\n('204', ' '): \"UNSCHEDULED_AUCTION\",\n('100', 'B0'): \"UNSCHEDULED_AUCTION\",\n('200', 'B1'): \"CONTINUOUS_TRADING\",\n('400', 'C0'): \"HALTED\",\n('400', 'C1'): \"CONTINUOUS_TRADING\",\n('400', 'D0'): \"HALTED\",\n('400', 'D1'): \"POST_TRADE\"}\n\nmarket_info_df[\"market_state\"] = market_info_df.apply(lambda row: MARKET_STATES_DICT[(row[\"is_and_mp\"],row[\"trading_status\"])] if row[\"is_and_mp\"] in no_collision_issue_status or row[\"trading_status\"] != \" \" else None, axis=1)\n\n[Out]:\n issue_status market_phase trading_status is_and_mp market_state\n0 10 0 B0 100 UNSCHEDULED_AUCTION\n1 20 0 200 CONTINUOUS_TRADING\n2 40 0 D1 400 POST_TRADE\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074508231_dataframe_pandas_python.txt
Q: How can I tell what filter size to use for a certain size image? I was developing a GAN to generate 48x48 images of faces. However, the generator makes strange images no matter how much training is done, and no matter how much the discriminator thinks it's fake. This leads me to believe that it is an architectural problem. untrained output After 25 epochs The problem is obvious. Squares generating in patterns instead of random pixels, as would be expected from an untrained GAN. This problem appears to be related to the filter size of the deconvolution layers in the generator, but I'm not sure how. This is an image from a 5x5 kernel size My question is: Why is this happening? What effect is the filter size having on the images that causes this sort of pattern How can I tell what size filter to use in relation to the image size or other parameter? Models: generator = keras.Sequential([ keras.layers.Dense(4*4*32, input_shape=(100,), use_bias=False), keras.layers.LeakyReLU(), keras.layers.Reshape((4, 4, 32)), SpectralNormalization(keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same", use_bias=False)), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same", use_bias=False)), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2DTranspose(3, (2, 2), strides=(3, 3), padding="same", use_bias=False)), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2DTranspose(3, (2, 2), strides=(2, 2), padding="same", use_bias=False)), ]) discriminator = keras.Sequential([ keras.layers.Conv2D(128, (2, 2), strides=(2, 2), input_shape=(96, 96, 3), padding="same"), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2D(64, (2, 2), strides=(2, 2), padding="same")), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2D(32, (2, 2), strides=(2, 2), padding="same")), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2D(16, (2, 2), strides=(2, 2), padding="same")), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2D(8, (2, 2), strides=(2, 2), padding="same")), keras.layers.LeakyReLU(), keras.layers.Flatten(), keras.layers.Dense(1, activation="sigmoid") ]) The training loop was taken from the Tensorflow GAN tutorial cross_entropy = tf.keras.losses.BinaryCrossentropy() def discriminator_loss(real_output, fake_output): real_loss = cross_entropy(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) total_loss = real_loss + fake_loss return total_loss def generator_loss(fake_output): return cross_entropy(tf.ones_like(fake_output), fake_output) generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) @tf.function def train_step(images): noise = tf.random.normal([32, 100]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) fake_output = discriminator(generated_images, training=True) gen_loss = generator_loss(fake_output) disc_loss = discriminator_loss(real_output, fake_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) A: Issue is caused when larger strides are introduced at the final layers Consider a 1d case: input values = i1 | i2 | i3 transposeConv1(k=2,s=3) weights (k=2) = w1 | w2 | initialization = 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 output =i1w1|i1w2| 0 |i2w1|i2w2| 0 |i3w1|i3w2 = o1 | o2 | 0 | o4 | o5 | 0 | o7 | o8 It introduces blockiness with zeros in between Consider the last layer together with the previous layer transposeConv1(k=2,s=2) weights (k=2) = w1 | w2 | initialization = 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ... output =o1w1|o1w2|o2w1|o2w2| 0 | 0 |o4w1|o4w2| ... Now more zeros are introduced A: I don't think you are supposed to use SpectralNormalization in the generator. It is meant to smooth out the learning for WGAN and should only be used in the discriminator. SNGAN Paper You keep getting checkered patterns because ConvTranspose generates them. To counter them, your kernel size should be divisible by stride in ConvTranspose. A: I believe the issue is your weight init. Can you print your input layer weights before and after training? So generator.get_weights(), then train, then generator.get_weights() again. Could you also do the same with your output layer of the generator? A: Here is the code for WGAN with Gradient Penalty BATCH_SIZE = 32 latent_dim = 128 c_lambda = 10 # Will reuse this seed overtime to visualize generated image at each epoch seed = tf.random.normal([4, latent_dim]) def get_generator_model(): ... return model generator = get_generator_model() noise = tf.random.normal([1, 128]) generated_image = generator(noise, training=False) def get_discriminator_model(): ... return model discriminator = get_discriminator_model() decision = discriminator(generated_image) class WGAN(tf.keras.Model):   def __init__(self, discriminator, generator, latent_dim, discriminator_extra_steps=3):     super(WGAN, self).__init__()          self.discriminator = discriminator     self.generator = generator     self.latent_dim = latent_dim     self.d_steps = discriminator_extra_steps   def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn):     super(WGAN, self).compile()        self.d_optimizer = d_optimizer     self.g_optimizer = g_optimizer     self.d_loss_fn = d_loss_fn     self.g_loss_fn = g_loss_fn   def gradient_penalty(self, batch_size, real_images, fake_images):     epsilon = tf.random.normal(real_images[0].shape, 1, 1, 1)     mixed_images = real_images * epsilon + fake_images * (1 - epsilon)     with tf.GradientTape() as tape:       tape.watch(mixed_images)       mixed_scores = self.discriminator(mixed_images)     grads = tape.gradient(mixed_scores, mixed_images)     # norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))     norm = tf.norm(grads, axis=0)     gp = tf.reduce_mean((norm - 1.0) ** 2)     return gp        def train_step(self, real_images):     if isinstance(real_images, tuple):       real_images = real_images[0]     batch_size = tf.shape(real_images)[0]     # Train the discriminator     for i in range(self.d_steps):       random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))       with tf.GradientTape() as tape:         fake_images = self.generator(random_latent_vectors, training=True)         fake_logits = self.discriminator(fake_images, training=True)         real_logits = self.discriminator(real_images, training=True)                  gp = self.gradient_penalty(batch_size, real_images, fake_images)                  d_loss = self.d_loss_fn(real_img=real_logits, fake_img=fake_logits, gp=gp)       d_gradient = tape.gradient(d_loss, self.discriminator.trainable_variables)       self.d_optimizer.apply_gradients(zip(d_gradient, self.discriminator.trainable_variables))     # Train the generator     random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))     with tf.GradientTape() as tape:       generated_images = self.generator(random_latent_vectors, training=True)       gen_img_logits = self.discriminator(generated_images, training=True)       g_loss = self.g_loss_fn(gen_img_logits)     gen_gradient = tape.gradient(g_loss, self.generator.trainable_variables)     self.g_optimizer.apply_gradients(zip(gen_gradient, self.generator.trainable_variables))          return {"d_loss": d_loss, "g_loss": g_loss} class GANMonitor(tf.keras.callbacks.Callback):   def __init__(self, num_img=4, latent_dim=128):     self.num_img = num_img     self.latent_dim = latent_dim   def on_epoch_end(self, epoch, logs=None):     generated_images = self.model.generator(seed)     generated_images = (generated_images * 127.5) + 127.5     fig = plt.figure(figsize=(8, 8))     for i in range(self.num_img):       plt.subplot(2, 2, i+1)       plt.imshow(generated_images[i, :, :, 0], cmap='gray')       plt.axis('off')     plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))     plt.show() generator_optimizer = tf.keras.optimizers.Adam(     learning_rate=0.0002, beta_1=0.5, beta_2=0.999) discriminator_optimizer = tf.keras.optimizers.Adam(     learning_rate=0.0002, beta_1=0.5, beta_2=0.999) def discriminator_loss(real_img, fake_img, gp):     real_loss = tf.reduce_mean(real_img)     fake_loss = tf.reduce_mean(fake_img)     return fake_loss - real_loss + c_lambda * gp def generator_loss(fake_img):     return -tf.reduce_mean(fake_img) epochs = 250 # callback cbk = GANMonitor(num_img=4, latent_dim=latent_dim) wgan = WGAN(discriminator,                   generator,             latent_dim=latent_dim,             discriminator_extra_steps=3) wgan.compile(d_optimizer=discriminator_optimizer,              g_optimizer=generator_optimizer,              g_loss_fn=generator_loss,              d_loss_fn=discriminator_loss) history = wgan.fit(dataset, batch_size=BATCH_SIZE, epochs=epochs, callbacks=[cbk])
How can I tell what filter size to use for a certain size image?
I was developing a GAN to generate 48x48 images of faces. However, the generator makes strange images no matter how much training is done, and no matter how much the discriminator thinks it's fake. This leads me to believe that it is an architectural problem. untrained output After 25 epochs The problem is obvious. Squares generating in patterns instead of random pixels, as would be expected from an untrained GAN. This problem appears to be related to the filter size of the deconvolution layers in the generator, but I'm not sure how. This is an image from a 5x5 kernel size My question is: Why is this happening? What effect is the filter size having on the images that causes this sort of pattern How can I tell what size filter to use in relation to the image size or other parameter? Models: generator = keras.Sequential([ keras.layers.Dense(4*4*32, input_shape=(100,), use_bias=False), keras.layers.LeakyReLU(), keras.layers.Reshape((4, 4, 32)), SpectralNormalization(keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same", use_bias=False)), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same", use_bias=False)), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2DTranspose(3, (2, 2), strides=(3, 3), padding="same", use_bias=False)), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2DTranspose(3, (2, 2), strides=(2, 2), padding="same", use_bias=False)), ]) discriminator = keras.Sequential([ keras.layers.Conv2D(128, (2, 2), strides=(2, 2), input_shape=(96, 96, 3), padding="same"), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2D(64, (2, 2), strides=(2, 2), padding="same")), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2D(32, (2, 2), strides=(2, 2), padding="same")), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2D(16, (2, 2), strides=(2, 2), padding="same")), keras.layers.LeakyReLU(), SpectralNormalization(keras.layers.Conv2D(8, (2, 2), strides=(2, 2), padding="same")), keras.layers.LeakyReLU(), keras.layers.Flatten(), keras.layers.Dense(1, activation="sigmoid") ]) The training loop was taken from the Tensorflow GAN tutorial cross_entropy = tf.keras.losses.BinaryCrossentropy() def discriminator_loss(real_output, fake_output): real_loss = cross_entropy(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) total_loss = real_loss + fake_loss return total_loss def generator_loss(fake_output): return cross_entropy(tf.ones_like(fake_output), fake_output) generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) @tf.function def train_step(images): noise = tf.random.normal([32, 100]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) fake_output = discriminator(generated_images, training=True) gen_loss = generator_loss(fake_output) disc_loss = discriminator_loss(real_output, fake_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
[ "Issue is caused when larger strides are introduced at the final layers\n\nConsider a 1d case:\n\ninput values = i1 | i2 | i3 \n\ntransposeConv1(k=2,s=3) \nweights (k=2) = w1 | w2 |\ninitialization = 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0\noutput =i1w1|i1w2| 0 |i2w1|i2w2| 0 |i3w1|i3w2\n = o1 | o2 | 0 | o4 | o5 | 0 | o7 | o8 \nIt introduces blockiness with zeros in between\n\nConsider the last layer together with the previous layer\ntransposeConv1(k=2,s=2) \nweights (k=2) = w1 | w2 |\ninitialization = 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ...\noutput =o1w1|o1w2|o2w1|o2w2| 0 | 0 |o4w1|o4w2| ...\nNow more zeros are introduced\n\n", "I don't think you are supposed to use SpectralNormalization in the generator. It is meant to smooth out the learning for WGAN and should only be used in the discriminator. SNGAN Paper\nYou keep getting checkered patterns because ConvTranspose generates them. To counter them, your kernel size should be divisible by stride in ConvTranspose.\n", "I believe the issue is your weight init. Can you print your input layer weights before and after training? So generator.get_weights(), then train, then generator.get_weights() again.\nCould you also do the same with your output layer of the generator?\n", "Here is the code for WGAN with Gradient Penalty\nBATCH_SIZE = 32\nlatent_dim = 128\nc_lambda = 10\n\n# Will reuse this seed overtime to visualize generated image at each epoch\nseed = tf.random.normal([4, latent_dim])\n\ndef get_generator_model():\n...\nreturn model\n\ngenerator = get_generator_model()\n\nnoise = tf.random.normal([1, 128])\ngenerated_image = generator(noise, training=False)\n\ndef get_discriminator_model():\n...\nreturn model\n\ndiscriminator = get_discriminator_model()\ndecision = discriminator(generated_image)\n\nclass WGAN(tf.keras.Model):\n  def __init__(self, discriminator, generator, latent_dim, discriminator_extra_steps=3):\n    super(WGAN, self).__init__()\n    \n    self.discriminator = discriminator\n    self.generator = generator\n    self.latent_dim = latent_dim\n    self.d_steps = discriminator_extra_steps\n\n  def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn):\n    super(WGAN, self).compile()\n  \n    self.d_optimizer = d_optimizer\n    self.g_optimizer = g_optimizer\n    self.d_loss_fn = d_loss_fn\n    self.g_loss_fn = g_loss_fn\n\n  def gradient_penalty(self, batch_size, real_images, fake_images):\n    epsilon = tf.random.normal(real_images[0].shape, 1, 1, 1)\n    mixed_images = real_images * epsilon + fake_images * (1 - epsilon)\n\n    with tf.GradientTape() as tape:\n      tape.watch(mixed_images)\n      mixed_scores = self.discriminator(mixed_images)\n\n    grads = tape.gradient(mixed_scores, mixed_images)\n    # norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))\n    norm = tf.norm(grads, axis=0)\n    gp = tf.reduce_mean((norm - 1.0) ** 2)\n    return gp\n    \n  def train_step(self, real_images):\n    if isinstance(real_images, tuple):\n      real_images = real_images[0]\n\n    batch_size = tf.shape(real_images)[0]\n\n    # Train the discriminator\n    for i in range(self.d_steps):\n      random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\n      with tf.GradientTape() as tape:\n        fake_images = self.generator(random_latent_vectors, training=True)\n        fake_logits = self.discriminator(fake_images, training=True)\n        real_logits = self.discriminator(real_images, training=True)\n        \n        gp = self.gradient_penalty(batch_size, real_images, fake_images)\n        \n        d_loss = self.d_loss_fn(real_img=real_logits, fake_img=fake_logits, gp=gp)\n\n      d_gradient = tape.gradient(d_loss, self.discriminator.trainable_variables)\n      self.d_optimizer.apply_gradients(zip(d_gradient, self.discriminator.trainable_variables))\n\n    # Train the generator\n    random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))\n    with tf.GradientTape() as tape:\n      generated_images = self.generator(random_latent_vectors, training=True)\n      gen_img_logits = self.discriminator(generated_images, training=True)\n      g_loss = self.g_loss_fn(gen_img_logits)\n\n    gen_gradient = tape.gradient(g_loss, self.generator.trainable_variables)\n    self.g_optimizer.apply_gradients(zip(gen_gradient, self.generator.trainable_variables))\n    \n    return {\"d_loss\": d_loss, \"g_loss\": g_loss}\n\nclass GANMonitor(tf.keras.callbacks.Callback):\n  def __init__(self, num_img=4, latent_dim=128):\n    self.num_img = num_img\n    self.latent_dim = latent_dim\n\n  def on_epoch_end(self, epoch, logs=None):\n    generated_images = self.model.generator(seed)\n    generated_images = (generated_images * 127.5) + 127.5\n\n    fig = plt.figure(figsize=(8, 8))\n\n    for i in range(self.num_img):\n      plt.subplot(2, 2, i+1)\n      plt.imshow(generated_images[i, :, :, 0], cmap='gray')\n      plt.axis('off')\n    plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n    plt.show()\n\ngenerator_optimizer = tf.keras.optimizers.Adam(\n    learning_rate=0.0002, beta_1=0.5, beta_2=0.999)\n\ndiscriminator_optimizer = tf.keras.optimizers.Adam(\n    learning_rate=0.0002, beta_1=0.5, beta_2=0.999)\n\ndef discriminator_loss(real_img, fake_img, gp):\n    real_loss = tf.reduce_mean(real_img)\n    fake_loss = tf.reduce_mean(fake_img)\n    return fake_loss - real_loss + c_lambda * gp\n\ndef generator_loss(fake_img):\n    return -tf.reduce_mean(fake_img)\n\nepochs = 250\n\n# callback\ncbk = GANMonitor(num_img=4, latent_dim=latent_dim)\n\nwgan = WGAN(discriminator,\n                  generator,\n            latent_dim=latent_dim,\n            discriminator_extra_steps=3)\n\nwgan.compile(d_optimizer=discriminator_optimizer,\n             g_optimizer=generator_optimizer,\n             g_loss_fn=generator_loss,\n             d_loss_fn=discriminator_loss)\n\nhistory = wgan.fit(dataset, batch_size=BATCH_SIZE, epochs=epochs, callbacks=[cbk])\n\n" ]
[ 1, 0, 0, 0 ]
[]
[]
[ "generative_adversarial_network", "machine_learning", "python", "tensorflow" ]
stackoverflow_0074257567_generative_adversarial_network_machine_learning_python_tensorflow.txt
Q: Appending hashes to a list for reference I'm using the imagehash library in Python and i'm trying to compare one image to a set of images to determine if it's similar to any of them. To avoid having to fetch the hashes every time I run the program I generated each hash and appended it to a list (one time operation) however when running the program to actually compare, it appears that the Hash is it's own type of value and thus can't be easily subtracted/added (Mainly because generating the hashes and adding to a list gives me a HUGE list of True/Falses instead of a clean alphanumeric value like 003c7e7e7e7e0000 Kindly ignore syntax errors as I have edited the actual code for posting this question Basically, can I convert a string to an imagehash? If not, any recommendations? Below is a simplified version of the code i'm using Part 1 (Fetching the hashes) import imagehash import os from PIL import Image def hashit(a): return((imagehash.average_hash(Image.open(a)))) directory=r"C:\Users\65903\Desktop\Images" hashes=[] for filename in os.listdir(directory): f = os.path.join(directory, filename) # checking if it is a file if os.path.isfile(f): has=hashit(f) hashes.append(has) The above generates a ~40 line list of hashes of True/Falses (which isn't really practical to have in my case) Part 2 (Comparing with a list of hashes) from PIL import Image import imagehash import os import time def hashit(a): return((imagehash.average_hash(Image.open(a)))) def ifinlist(p): hashes=['003c7e7e7e7e0000','f8f8d80878f8f801','fad20000ff7f7f0f','f0000028e0f0f7ff','f8f7e8089d3208dd'] for i in hashes: if abs(i-p)<25: return("Similar image found") print(ifinlist(hashit(r"c/users/filenamestufftestidk"))) The above gives an error that imagehash and string cannot be subtracted (which I know to be true) A: The hash value returned by the average_hash method can be converted to a hex value using the str builtin. E.g. str(hashit(img)). To reverse this (for Part 2) you can use the function imagehash.hex_to_hash. from PIL import Image import imagehash import os import time def hashit(a): return((imagehash.average_hash(Image.open(a)))) def ifinlist(p): hashes=['003c7e7e7e7e0000','f8f8d80878f8f801','fad20000ff7f7f0f','f0000028e0f0f7ff','f8f7e8089d3208dd'] for i in hashes: if abs(imagehash.hex_to_hash(i) - p) < 25: return("Similar image found") print(ifinlist(hashit(r"c/users/filenamestufftestidk")))
Appending hashes to a list for reference
I'm using the imagehash library in Python and i'm trying to compare one image to a set of images to determine if it's similar to any of them. To avoid having to fetch the hashes every time I run the program I generated each hash and appended it to a list (one time operation) however when running the program to actually compare, it appears that the Hash is it's own type of value and thus can't be easily subtracted/added (Mainly because generating the hashes and adding to a list gives me a HUGE list of True/Falses instead of a clean alphanumeric value like 003c7e7e7e7e0000 Kindly ignore syntax errors as I have edited the actual code for posting this question Basically, can I convert a string to an imagehash? If not, any recommendations? Below is a simplified version of the code i'm using Part 1 (Fetching the hashes) import imagehash import os from PIL import Image def hashit(a): return((imagehash.average_hash(Image.open(a)))) directory=r"C:\Users\65903\Desktop\Images" hashes=[] for filename in os.listdir(directory): f = os.path.join(directory, filename) # checking if it is a file if os.path.isfile(f): has=hashit(f) hashes.append(has) The above generates a ~40 line list of hashes of True/Falses (which isn't really practical to have in my case) Part 2 (Comparing with a list of hashes) from PIL import Image import imagehash import os import time def hashit(a): return((imagehash.average_hash(Image.open(a)))) def ifinlist(p): hashes=['003c7e7e7e7e0000','f8f8d80878f8f801','fad20000ff7f7f0f','f0000028e0f0f7ff','f8f7e8089d3208dd'] for i in hashes: if abs(i-p)<25: return("Similar image found") print(ifinlist(hashit(r"c/users/filenamestufftestidk"))) The above gives an error that imagehash and string cannot be subtracted (which I know to be true)
[ "The hash value returned by the average_hash method can be converted to a hex value using the str builtin. E.g. str(hashit(img)).\nTo reverse this (for Part 2) you can use the function imagehash.hex_to_hash.\nfrom PIL import Image\nimport imagehash\nimport os\nimport time\n\ndef hashit(a):\n return((imagehash.average_hash(Image.open(a))))\n\ndef ifinlist(p):\n hashes=['003c7e7e7e7e0000','f8f8d80878f8f801','fad20000ff7f7f0f','f0000028e0f0f7ff','f8f7e8089d3208dd']\n for i in hashes:\n if abs(imagehash.hex_to_hash(i) - p) < 25:\n return(\"Similar image found\")\n\nprint(ifinlist(hashit(r\"c/users/filenamestufftestidk\")))\n\n" ]
[ 1 ]
[]
[]
[ "hash", "imagehash", "python" ]
stackoverflow_0074508395_hash_imagehash_python.txt
Q: VideoPlayer error loading video only plays sound - Warning: Removing channel layout 0x3, redundant with 2 channels I am trying to use VideoPlayer to load a local video. The program runs fine when it is standalone (in its own file). But, when I bring it into my main program, it loads the video but only plays the sound. I get an error (warning, to be more precise) message: [WARNING] [ffpyplayer ] [ffpyplayer_abuffersink @ 000001e84fb55580] Removing channel layout 0x3, redundant with 2 channels Here's the standalone version: import cv2 from kivy.uix.videoplayer import VideoPlayer from kivymd.app import MDApp class PlayVid(MDApp): def build(self): player = VideoPlayer(source="Roadhouse.mp4") player.state = "play" player.options = {"eos": "stop"} player.allow_stretch = True return player if __name__ == '__main__': PlayVid().run() And here is the same thing split in functions and class in my main program: class PlayVid(MDApp): def playnow(self): # player = VideoPlayer(source='Roadhouse.mp4') # player.state = "play" video = VideoPlayer(source='Roadhouse.mp4') video.state = "play" # player.options = {"eos": "stop"} # player.allow_stretch = True return video class SecondWindow(Screen): def build (self): sm = ScreenManager() self.sec_screen = SecondWindow() sm.add_widget(self.sec_screen) return sm def start_play(self): PlayVid.playnow(self) A kv button in SecondWindow triggers start_play and then PlayVid.playnow(self). That's all. It runs, loads the file, and then just plays the sound. No video. I can't understand what I'm doing wrong. Help? Thanks! I created a standalone program for it, and it works. I just can't understand why it drops the video and plays the sound when brought into the main program. A: Your playnow() method returns the VideoPlayer widget, but that return is ignored. You must add that widget to your GUI in order to see it. Try using this version of start_play(): def start_play(self): v = MDApp.get_running_app().playnow() self.add_widget(v)
VideoPlayer error loading video only plays sound - Warning: Removing channel layout 0x3, redundant with 2 channels
I am trying to use VideoPlayer to load a local video. The program runs fine when it is standalone (in its own file). But, when I bring it into my main program, it loads the video but only plays the sound. I get an error (warning, to be more precise) message: [WARNING] [ffpyplayer ] [ffpyplayer_abuffersink @ 000001e84fb55580] Removing channel layout 0x3, redundant with 2 channels Here's the standalone version: import cv2 from kivy.uix.videoplayer import VideoPlayer from kivymd.app import MDApp class PlayVid(MDApp): def build(self): player = VideoPlayer(source="Roadhouse.mp4") player.state = "play" player.options = {"eos": "stop"} player.allow_stretch = True return player if __name__ == '__main__': PlayVid().run() And here is the same thing split in functions and class in my main program: class PlayVid(MDApp): def playnow(self): # player = VideoPlayer(source='Roadhouse.mp4') # player.state = "play" video = VideoPlayer(source='Roadhouse.mp4') video.state = "play" # player.options = {"eos": "stop"} # player.allow_stretch = True return video class SecondWindow(Screen): def build (self): sm = ScreenManager() self.sec_screen = SecondWindow() sm.add_widget(self.sec_screen) return sm def start_play(self): PlayVid.playnow(self) A kv button in SecondWindow triggers start_play and then PlayVid.playnow(self). That's all. It runs, loads the file, and then just plays the sound. No video. I can't understand what I'm doing wrong. Help? Thanks! I created a standalone program for it, and it works. I just can't understand why it drops the video and plays the sound when brought into the main program.
[ "Your playnow() method returns the VideoPlayer widget, but that return is ignored. You must add that widget to your GUI in order to see it. Try using this version of start_play():\ndef start_play(self):\n\n v = MDApp.get_running_app().playnow()\n self.add_widget(v)\n\n" ]
[ 0 ]
[]
[]
[ "kivy", "kivy_language", "python", "video_player" ]
stackoverflow_0074507339_kivy_kivy_language_python_video_player.txt
Q: How to get the highest value per category in a dataframe? I have a dataframe called movie_df that has more than 3000 values of title, score, and rating. Titles are unique. Scores are 0.0 - 10.0. Ratings are either PG-13, G, R, or X. They are sorted by their rating, then ascending score. I want to find the highest rated title per rating. The highest rated title doesn't have an equal rating with another title. title score rating avengers 5.4 PG-13 captain america 6.7 PG-13 iron man 8.6 PG-13 ... ... ... spiderman 7 R daredevil 8.2 R deadpool 10 R Expected output: PG-13 : Iron Man, R : Deadpool I don't want to use a loop to find the highest rated title. I tried: movie_df.sort_values(by=['rating', 'score'], inplace=True) # sort by rating, score print(movie_df.to_string()) # to show dataframe movie_df.groupby('rating').max() It shows me the correct highest score, but the title is wrong. It shows me the max title too, but I don't want that. I want to know the title associated with the highest score. Here is the actual data I'm using with its highest rated titles: Rated G Actual, NC-17 Actual, PG Actual, PG-13 Actual, R Actual And the output: Rated G output, PG Output, PG-13 Output, R Output A: Let's try: movie_df.reset_index(drop=True, inplace=True) m=max(movie_df['score']) print(movie_df['rating'][list(movie_df['score']).index(m)]) I think you can also use groupby() and agg() A: I think your data isn't actually sorted right, that's why you're getting the wrong title but the right score. Try movie_df.groupby('rating').idxmax() and check if you're getting the right index. A: Actually what you want is highest score per rating, you can group each rating by highest score in this way: data = [['avengers', 5.4 ,'PG-13'], ['captain america', 6.7, 'PG-13'], ['spiderman', 7, 'R'], ['daredevil', 8.2, 'R'], ['iron man', 8.6, 'PG-13'], ['deadpool', 10, 'R']] df = pd.DataFrame(data, columns=['title', 'score', 'rating']) # Method 1 using lambda function df = df.groupby('rating').apply(lambda x: x.sort_values('score', ascending = False).head(1)) print(df.reset_index(drop=True)) # Method 2 df = df.sort_values('score', ascending = False).groupby('rating').head(1) print(df.reset_index(drop=True)) Output1: title score rating 0 iron man 8.6 PG-13 1 deadpool 10.0 R Output2: title score rating 0 deadpool 10.0 R 1 iron man 8.6 PG-13 A: I got it. This code outputs the highest scored title per rating. movie_df["rank"] = movie_df.groupby("rating")["score"].rank("dense", ascending=False) movie_df[movie_df["rank"]==1.0][['title','score']] It uses the rank function from pandas. https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rank.html
How to get the highest value per category in a dataframe?
I have a dataframe called movie_df that has more than 3000 values of title, score, and rating. Titles are unique. Scores are 0.0 - 10.0. Ratings are either PG-13, G, R, or X. They are sorted by their rating, then ascending score. I want to find the highest rated title per rating. The highest rated title doesn't have an equal rating with another title. title score rating avengers 5.4 PG-13 captain america 6.7 PG-13 iron man 8.6 PG-13 ... ... ... spiderman 7 R daredevil 8.2 R deadpool 10 R Expected output: PG-13 : Iron Man, R : Deadpool I don't want to use a loop to find the highest rated title. I tried: movie_df.sort_values(by=['rating', 'score'], inplace=True) # sort by rating, score print(movie_df.to_string()) # to show dataframe movie_df.groupby('rating').max() It shows me the correct highest score, but the title is wrong. It shows me the max title too, but I don't want that. I want to know the title associated with the highest score. Here is the actual data I'm using with its highest rated titles: Rated G Actual, NC-17 Actual, PG Actual, PG-13 Actual, R Actual And the output: Rated G output, PG Output, PG-13 Output, R Output
[ "Let's try:\nmovie_df.reset_index(drop=True, inplace=True)\n\nm=max(movie_df['score'])\n\nprint(movie_df['rating'][list(movie_df['score']).index(m)])\n\n\nI think you can also use groupby() and agg()\n", "I think your data isn't actually sorted right, that's why you're getting the wrong title but the right score.\nTry movie_df.groupby('rating').idxmax() and check if you're getting the right index.\n", "Actually what you want is highest score per rating, you can group each rating by highest score in this way:\ndata = [['avengers', 5.4 ,'PG-13'],\n['captain america', 6.7, 'PG-13'],\n['spiderman', 7, 'R'],\n['daredevil', 8.2, 'R'],\n['iron man', 8.6, 'PG-13'],\n['deadpool', 10, 'R']]\n \ndf = pd.DataFrame(data, columns=['title', 'score', 'rating']) \n# Method 1 using lambda function\ndf = df.groupby('rating').apply(lambda x: x.sort_values('score', ascending = False).head(1))\nprint(df.reset_index(drop=True))\n\n# Method 2 \ndf = df.sort_values('score', ascending = False).groupby('rating').head(1)\nprint(df.reset_index(drop=True))\n\nOutput1:\n title score rating\n0 iron man 8.6 PG-13\n1 deadpool 10.0 R\n\nOutput2:\n title score rating\n0 deadpool 10.0 R\n1 iron man 8.6 PG-13\n\n", "I got it. This code outputs the highest scored title per rating.\nmovie_df[\"rank\"] = movie_df.groupby(\"rating\")[\"score\"].rank(\"dense\", ascending=False)\nmovie_df[movie_df[\"rank\"]==1.0][['title','score']]\n\nIt uses the rank function from pandas.\nhttps://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rank.html\n" ]
[ 1, 0, 0, 0 ]
[]
[]
[ "dataframe", "jupyter_notebook", "pandas", "python" ]
stackoverflow_0074507915_dataframe_jupyter_notebook_pandas_python.txt
Q: checking user file if user has admin access I am writing a very program that checks if a user is logged in with the correct username and password and also if a user has admin access . the password file is a simple text file with the columns seperated by tabs. the login part works, but I can't get the code to check if a user has admin access to work. if a user is an admin then they access to a separate menu than a normal user. This is the definition code to check the file for the user type: def admin_user(): with open("password.txt", "r") as f: is_admin = False for line in f: loginInfo = line.split("\t") if loginInfo[2] == "admin": is_admin = True return is_admin The menu should be something like this: if admin_user(): print("do something ") In the program both admin users and regular users see the same menu even if in the username file they only have regular access. A: If I understand correctly what you're trying to achieve, I think your admin_user() is missing a parameter. As is, you're only checking if the admin string is present once or more in your password.txt file but you don't check to which user it is associated to so the result is the same for all users. What about something like: def admin_user(username: str): with open("password.txt", "r") as f: for line in f: loginInfo = line.split("\t") if loginInfo[0] == username and loginInfo[2] == "admin": return True return False And you would have to call it with a username: if admin_user("donieD"): # you probably want to use a variable here instead of a static string print("do something") This call should succeeds for donieD given your password.txt example.
checking user file if user has admin access
I am writing a very program that checks if a user is logged in with the correct username and password and also if a user has admin access . the password file is a simple text file with the columns seperated by tabs. the login part works, but I can't get the code to check if a user has admin access to work. if a user is an admin then they access to a separate menu than a normal user. This is the definition code to check the file for the user type: def admin_user(): with open("password.txt", "r") as f: is_admin = False for line in f: loginInfo = line.split("\t") if loginInfo[2] == "admin": is_admin = True return is_admin The menu should be something like this: if admin_user(): print("do something ") In the program both admin users and regular users see the same menu even if in the username file they only have regular access.
[ "If I understand correctly what you're trying to achieve, I think your admin_user() is missing a parameter.\nAs is, you're only checking if the admin string is present once or more in your password.txt file but you don't check to which user it is associated to so the result is the same for all users.\nWhat about something like:\ndef admin_user(username: str):\n with open(\"password.txt\", \"r\") as f:\n for line in f:\n loginInfo = line.split(\"\\t\")\n if loginInfo[0] == username and loginInfo[2] == \"admin\":\n return True\n \n return False\n\nAnd you would have to call it with a username:\nif admin_user(\"donieD\"): # you probably want to use a variable here instead of a static string\n print(\"do something\") \n\nThis call should succeeds for donieD given your password.txt example.\n" ]
[ 0 ]
[]
[]
[ "if_statement", "python", "return" ]
stackoverflow_0074507589_if_statement_python_return.txt
Q: JSON File Parsing In Python Brings Different Line In Each Execution I am trying to analyze a large dataset from Yelp. Data is in json file format but it is too large, so script is crahsing when it tries to read all data in same time. So I decided to read line by line and concat the lines in a dataframe to have a proper sample from the data. f = open('./yelp_academic_dataset_review.json', encoding='utf-8') I tried without encoding utf-8 but it creates an error. I created a function that reads the file line by line and make a pandas dataframe up to given number of lines. Anyway some lines are lists. And script iterates in each list too and adds to dataframe. def json_parser(file, max_chunk): f = open(file) df = pd.DataFrame([]) for i in range(2, max_chunk + 2): try: type(f.readlines(i)) == list for j in range(len(f.readlines(i))): part = json.loads(f.readlines(i)[j]) df2 = pd.DataFrame(part.items()).T df2.columns = df2.iloc[0] df2 = df2.drop(0) datas = [df2, df] df2 = pd.concat(datas) df = df2 except: f = open(file, encoding = "utf-8") for j in range(len(f.readlines(i))): try: part = json.loads(f.readlines(i)[j-1]) except: print(i,j) df2 = pd.DataFrame(part.items()).T df2.columns = df2.iloc[0] df2 = df2.drop(0) datas = [df2, df] df2 = pd.concat(datas) df = df2 df2.reset_index(inplace=True, drop=True) return df2 But still I am having an error that list index out of range. (Yes I used print to debug). So I looked closer to that lines which causes this error. But very interestingly when I try to look at that lines, script gives me different list. Here what I meant: I runned the cells repeatedly and having different length of the list. So I looked at lists: It seems they are completely different lists. In each run it brings different list although line number is same. And readlines documentation is not helping. What am I missing? Thanks in advance. A: You are using the expression f.readlines(i) several times as if it was referring to the same set of lines each time. But as as side effect of evaluating the expression, more lines are actually read from the file. At one point you are basing the indices j on more lines than are actually available, because they came from a different invocation of f.readlines. You should use f.readlines(i) only once in each iteration of the for i in ... loop and store its result in a variable instead.
JSON File Parsing In Python Brings Different Line In Each Execution
I am trying to analyze a large dataset from Yelp. Data is in json file format but it is too large, so script is crahsing when it tries to read all data in same time. So I decided to read line by line and concat the lines in a dataframe to have a proper sample from the data. f = open('./yelp_academic_dataset_review.json', encoding='utf-8') I tried without encoding utf-8 but it creates an error. I created a function that reads the file line by line and make a pandas dataframe up to given number of lines. Anyway some lines are lists. And script iterates in each list too and adds to dataframe. def json_parser(file, max_chunk): f = open(file) df = pd.DataFrame([]) for i in range(2, max_chunk + 2): try: type(f.readlines(i)) == list for j in range(len(f.readlines(i))): part = json.loads(f.readlines(i)[j]) df2 = pd.DataFrame(part.items()).T df2.columns = df2.iloc[0] df2 = df2.drop(0) datas = [df2, df] df2 = pd.concat(datas) df = df2 except: f = open(file, encoding = "utf-8") for j in range(len(f.readlines(i))): try: part = json.loads(f.readlines(i)[j-1]) except: print(i,j) df2 = pd.DataFrame(part.items()).T df2.columns = df2.iloc[0] df2 = df2.drop(0) datas = [df2, df] df2 = pd.concat(datas) df = df2 df2.reset_index(inplace=True, drop=True) return df2 But still I am having an error that list index out of range. (Yes I used print to debug). So I looked closer to that lines which causes this error. But very interestingly when I try to look at that lines, script gives me different list. Here what I meant: I runned the cells repeatedly and having different length of the list. So I looked at lists: It seems they are completely different lists. In each run it brings different list although line number is same. And readlines documentation is not helping. What am I missing? Thanks in advance.
[ "You are using the expression f.readlines(i) several times as if it was referring to the same set of lines each time.\nBut as as side effect of evaluating the expression, more lines are actually read from the file. At one point you are basing the indices j on more lines than are actually available, because they came from a different invocation of f.readlines.\nYou should use f.readlines(i) only once in each iteration of the for i in ... loop and store its result in a variable instead.\n" ]
[ 1 ]
[]
[]
[ "json", "python" ]
stackoverflow_0074508470_json_python.txt
Q: A way in Python to check previous values based on a condition and save them in a seperate array? I have a list of numbers saved in an array in Python. What i want to do is to continously check if a value in that array is lower than the all the previous ones, and then generate a number based on how many there are. Is there a way to do this? I can't figure out how to do it. Alternative check the numbers and save all the higher ones in a seperate array and then use a basic command to count the numbers in that one. For ex i have the values; 1 3 6 5 9 5 2 8 1 10 4 Lets say i choose the last number, number four. Then my goal is to generate the number "6" since thats the amount of previous numbers that are higher than four. I haven't really tried anything since i don't know where to start. I have an assignment due in a few days and would really appreciate some help with this!
A way in Python to check previous values based on a condition and save them in a seperate array?
I have a list of numbers saved in an array in Python. What i want to do is to continously check if a value in that array is lower than the all the previous ones, and then generate a number based on how many there are. Is there a way to do this? I can't figure out how to do it. Alternative check the numbers and save all the higher ones in a seperate array and then use a basic command to count the numbers in that one. For ex i have the values; 1 3 6 5 9 5 2 8 1 10 4 Lets say i choose the last number, number four. Then my goal is to generate the number "6" since thats the amount of previous numbers that are higher than four. I haven't really tried anything since i don't know where to start. I have an assignment due in a few days and would really appreciate some help with this!
[]
[]
[ "create a list:\nal = [1,3,6,5,9,5,2,8,1,10,4]\n\nyour selected item(4) is at the index of : 11\nselected_item = 4\n\nindex = al.index(4) # will result as 11\n\n\nlen([al[a] for a in range(index) if al[a] > selected_item])\n\nwill give you the result you want.\n" ]
[ -2 ]
[ "arrays", "conditional_statements", "counting", "python" ]
stackoverflow_0074508559_arrays_conditional_statements_counting_python.txt
Q: How to rotate letters in a python string Create a scrolling_text function that accepts a string as a parameter, sequentially rearranges all the characters in the string from the zero index to the last one, and returns a list with all the received combinations in upper case. ` def scrolling_text(string: str) -> list: pass ` Example` scrolling_text("robot") returns: [ "ROBOT", "OBOTR", "BOTRO", "OTROB", "TROBO" ] ` I know only I return the list in uppercase A: The easiest way is to use slices of the string, which is an easy way of getting a subset of an sequence. In Python, str can be treated as a sequence of characters. The following function would do it: def scrolling_text(text: str) -> list[str]: ret = [] for i in range(len(text)): ret.append(text[i:] + text[:i]) return ret So this goes through every offset starting at zero and going up to the number of characters in the string. The expression text[i:] represents the substring from offset i onwards, and text[:i] represents the substring up to (but not including) I. If you wanted more advanced Python, you could use a list comprehension: def scrolling_text(text: str) -> list[str]: return [text[i:] + text[:i] for i in range(len(text))] Or you could use a generator to lazily evaluate the list: from typing import Iterator def scrolling_text(text: str) -> Iterator[str]: for i in range(len(text)): yield text[i:] + text[:i] A: This is a pretty basic python code: def scrolling_text(string: str) -> list: out = [] string = string.upper() for i in range(len(string)): out.append(string[i:] + string[:i]) return out res = scrolling_text("ROBOT") print(res) # ['ROBOT', 'OBOTR', 'BOTRO', 'OTROB', 'TROBO']
How to rotate letters in a python string
Create a scrolling_text function that accepts a string as a parameter, sequentially rearranges all the characters in the string from the zero index to the last one, and returns a list with all the received combinations in upper case. ` def scrolling_text(string: str) -> list: pass ` Example` scrolling_text("robot") returns: [ "ROBOT", "OBOTR", "BOTRO", "OTROB", "TROBO" ] ` I know only I return the list in uppercase
[ "The easiest way is to use slices of the string, which is an easy way of getting a subset of an sequence. In Python, str can be treated as a sequence of characters.\nThe following function would do it:\ndef scrolling_text(text: str) -> list[str]:\n ret = []\n for i in range(len(text)):\n ret.append(text[i:] + text[:i])\n return ret\n\nSo this goes through every offset starting at zero and going up to the number of characters in the string. The expression text[i:] represents the substring from offset i onwards, and text[:i] represents the substring up to (but not including) I.\nIf you wanted more advanced Python, you could use a list comprehension:\ndef scrolling_text(text: str) -> list[str]:\n return [text[i:] + text[:i] for i in range(len(text))]\n\nOr you could use a generator to lazily evaluate the list:\nfrom typing import Iterator\n\ndef scrolling_text(text: str) -> Iterator[str]:\n for i in range(len(text)):\n yield text[i:] + text[:i]\n\n", "This is a pretty basic python code:\ndef scrolling_text(string: str) -> list:\n out = []\n string = string.upper()\n for i in range(len(string)):\n out.append(string[i:] + string[:i])\n return out\n \nres = scrolling_text(\"ROBOT\")\nprint(res) # ['ROBOT', 'OBOTR', 'BOTRO', 'OTROB', 'TROBO']\n\n" ]
[ 1, 0 ]
[]
[]
[ "function", "list", "python", "python_3.x", "string" ]
stackoverflow_0074508580_function_list_python_python_3.x_string.txt
Q: read from a txt file with encoding= cp1256 I am trying to read and extract data from a file with encoding =cp1256 I can read the file and print all the information form it, but if I tried to search for something using the line.startswith it is not working printing = False with open(SourceFile,"r") as file: for line in file: if line.startswith("NODes\n"): # search for a keyword printing = True continue # go to next line elif line.startswith(";CON"): printing = False break #quit file reading if printing: print(line, file=PointsFile) PointsFile.close() it is only working if I save the file using the notepad and change the encoding to utf-8 the same code works fine what should I do to make it works without changing the encoding A: open has optional argument encoding, codecs - Standard Encodings shows table of encodings, as cp1256 is one of them it should suffice to replace with open(SourceFile,"r") as file: using with open(SourceFile,"r",encoding="cp1256") as file:
read from a txt file with encoding= cp1256
I am trying to read and extract data from a file with encoding =cp1256 I can read the file and print all the information form it, but if I tried to search for something using the line.startswith it is not working printing = False with open(SourceFile,"r") as file: for line in file: if line.startswith("NODes\n"): # search for a keyword printing = True continue # go to next line elif line.startswith(";CON"): printing = False break #quit file reading if printing: print(line, file=PointsFile) PointsFile.close() it is only working if I save the file using the notepad and change the encoding to utf-8 the same code works fine what should I do to make it works without changing the encoding
[ "open has optional argument encoding, codecs - Standard Encodings shows table of encodings, as cp1256 is one of them it should suffice to replace\nwith open(SourceFile,\"r\") as file:\n\nusing\nwith open(SourceFile,\"r\",encoding=\"cp1256\") as file:\n\n" ]
[ 2 ]
[]
[]
[ "file_handling", "python" ]
stackoverflow_0074508631_file_handling_python.txt
Q: combining dataframes that have the same 'country name' and same 'year' I m trying to merge these dataframes in a way that the final data frame would have matched the country year gdp from first dataframe with its corresponding values from second data frame. [] [] first data frame : Country Country code year rgdpe country1 Code1 year1 rgdpe1 country1 Code1 yearn rgdpen country2 Code2 year1 rgdpe1' second dataframe : countries value year country1 value1 year1 country1 valuen yearn country2 Code2 year1 combined dataframe: | Country | Country code | year |rgdpe |value| |:--------|:------------:|:----:|:-----:|:---:| |country1 | Code1 | year1|rgdpe1 |value| |country1 | Code1 | yearn|rgdpen |Value| |country2 | Code2 | year1|rgdpe1'|Value| combined=pd.merge(left=df_biofuel_prod, right=df_GDP[['rgdpe']], left_on='Value', right_on='country', how='right') combined.to_csv('../../combined_test.csv') the results of this code gives me just the rgdpe column while the other column are empty. What would be the most efficient way to merge and match these dataframes ? A: First, from the data screen cap, it looks like the "country" column in your first dataset "df_GDP" is set as index. Reset it using "reset_index()". Then merge on multiple columns like left_on=["countries","year"] and right_on=["country","year"]. And since you want to retain all records from your main dataframe "df_biofuel_prod", so it should be "left" join: combined_df = df_biofuel_prod.merge(df_GDP.reset_index(), left_on=["countries","year"], right_on=["country","year"], how="left") Full example with dummy data: df_GDP = pd.DataFrame(data=[["USA",2001,400],["USA",2002,450],["CAN",2001,150],["CAN",2002,170]], columns=["country","year","rgdpe"]).set_index("country") df_biofuel_prod = pd.DataFrame(data=[["USA",400,2001],["USA",450,2003],["CAN",150,2001],["CAN",170,2003]], columns=["countries","Value","year"]) combined_df = df_biofuel_prod.merge(df_GDP.reset_index(), left_on=["countries","year"], right_on=["country","year"], how="left") [Out]: countries Value year country rgdpe 0 USA 400 2001 USA 400.0 1 USA 450 2003 NaN NaN 2 CAN 150 2001 CAN 150.0 3 CAN 170 2003 NaN NaN You see "NaN" where matching data is not available in "df_GDP".
combining dataframes that have the same 'country name' and same 'year'
I m trying to merge these dataframes in a way that the final data frame would have matched the country year gdp from first dataframe with its corresponding values from second data frame. [] [] first data frame : Country Country code year rgdpe country1 Code1 year1 rgdpe1 country1 Code1 yearn rgdpen country2 Code2 year1 rgdpe1' second dataframe : countries value year country1 value1 year1 country1 valuen yearn country2 Code2 year1 combined dataframe: | Country | Country code | year |rgdpe |value| |:--------|:------------:|:----:|:-----:|:---:| |country1 | Code1 | year1|rgdpe1 |value| |country1 | Code1 | yearn|rgdpen |Value| |country2 | Code2 | year1|rgdpe1'|Value| combined=pd.merge(left=df_biofuel_prod, right=df_GDP[['rgdpe']], left_on='Value', right_on='country', how='right') combined.to_csv('../../combined_test.csv') the results of this code gives me just the rgdpe column while the other column are empty. What would be the most efficient way to merge and match these dataframes ?
[ "First, from the data screen cap, it looks like the \"country\" column in your first dataset \"df_GDP\" is set as index. Reset it using \"reset_index()\". Then merge on multiple columns like left_on=[\"countries\",\"year\"] and right_on=[\"country\",\"year\"]. And since you want to retain all records from your main dataframe \"df_biofuel_prod\", so it should be \"left\" join:\ncombined_df = df_biofuel_prod.merge(df_GDP.reset_index(), left_on=[\"countries\",\"year\"], right_on=[\"country\",\"year\"], how=\"left\")\n\nFull example with dummy data:\ndf_GDP = pd.DataFrame(data=[[\"USA\",2001,400],[\"USA\",2002,450],[\"CAN\",2001,150],[\"CAN\",2002,170]], columns=[\"country\",\"year\",\"rgdpe\"]).set_index(\"country\")\n\ndf_biofuel_prod = pd.DataFrame(data=[[\"USA\",400,2001],[\"USA\",450,2003],[\"CAN\",150,2001],[\"CAN\",170,2003]], columns=[\"countries\",\"Value\",\"year\"])\n\ncombined_df = df_biofuel_prod.merge(df_GDP.reset_index(), left_on=[\"countries\",\"year\"], right_on=[\"country\",\"year\"], how=\"left\")\n\n[Out]:\n countries Value year country rgdpe\n0 USA 400 2001 USA 400.0\n1 USA 450 2003 NaN NaN\n2 CAN 150 2001 CAN 150.0\n3 CAN 170 2003 NaN NaN\n\nYou see \"NaN\" where matching data is not available in \"df_GDP\".\n" ]
[ 0 ]
[]
[]
[ "data_analysis", "dataframe", "pandas", "python", "python_3.x" ]
stackoverflow_0074508510_data_analysis_dataframe_pandas_python_python_3.x.txt
Q: moving a file that contains specific element to another directory python I have the following code that prints the element I need in all xml files i have in the directory, im trying to move the files that contains the element "drone" to another directory but i cant make it, maybe someone can help me with that? import os import shutil from xml.etree import ElementTree as ET # files are in a sub folder where this script is being ran path = "D:\\TomProject\\Done" for filename in os.listdir(path): # Only get xml files if not filename.endswith('.xml'): continue # I haven't been able to get it to work by just saying 'if filename.endswith('.xml')' only if not.. fullname = os.path.join(path, filename) # This joins the path for each file it files so that python knows the full path / filename to trigger parser tree = ET.parse(fullname) # Parse the files.. print(filename) # Get the root of the XML tree structure root = tree.getroot() # Print the tags it finds from all the child elements from root for object in root.findall('object'): rank = object.find('name').text print(rank) name = 'drone' if rank == 'Drone': shutil.move("D:\\TomProject\\Images", "D:\\TomProject\\Done") A: The current code appears to be calling shutil.move with 2 hard-coded paths, but you should be passing the full path (fullname in your code) as the src argument instead. I recommend using pathlib instead of os.path functions. import shutil from pathlib import Path from xml.etree import ElementTree as ET def contains_drone(path): tree = ET.parse(path.as_posix()) root = tree.getroot() for obj in root.findall('object'): rank = obj.find('name').text if rank == 'Drone': return True return False def move_drone_files(src, dst): src, dst = Path(src), Path(dst) for path in src.iterdir(): if path.suffix == '.xml' and contains_drone(path): print(f'Moving {path.as_posix()} to {dst.as_posix()}') shutil.move(path, dst) Usage, assuming the Images directory is where the files are, and the Done directory is where you want to move them: move_drone_files('D:\\TomProject\\Images', 'D:\\TomProject\\Done')
moving a file that contains specific element to another directory python
I have the following code that prints the element I need in all xml files i have in the directory, im trying to move the files that contains the element "drone" to another directory but i cant make it, maybe someone can help me with that? import os import shutil from xml.etree import ElementTree as ET # files are in a sub folder where this script is being ran path = "D:\\TomProject\\Done" for filename in os.listdir(path): # Only get xml files if not filename.endswith('.xml'): continue # I haven't been able to get it to work by just saying 'if filename.endswith('.xml')' only if not.. fullname = os.path.join(path, filename) # This joins the path for each file it files so that python knows the full path / filename to trigger parser tree = ET.parse(fullname) # Parse the files.. print(filename) # Get the root of the XML tree structure root = tree.getroot() # Print the tags it finds from all the child elements from root for object in root.findall('object'): rank = object.find('name').text print(rank) name = 'drone' if rank == 'Drone': shutil.move("D:\\TomProject\\Images", "D:\\TomProject\\Done")
[ "The current code appears to be calling shutil.move with 2 hard-coded paths, but you should be passing the full path (fullname in your code) as the src argument instead.\nI recommend using pathlib instead of os.path functions.\nimport shutil\nfrom pathlib import Path\nfrom xml.etree import ElementTree as ET\n\n\ndef contains_drone(path):\n tree = ET.parse(path.as_posix())\n root = tree.getroot()\n for obj in root.findall('object'):\n rank = obj.find('name').text\n if rank == 'Drone':\n return True\n return False\n\n\ndef move_drone_files(src, dst):\n src, dst = Path(src), Path(dst)\n for path in src.iterdir():\n if path.suffix == '.xml' and contains_drone(path):\n print(f'Moving {path.as_posix()} to {dst.as_posix()}')\n shutil.move(path, dst)\n\nUsage, assuming the Images directory is where the files are, and the Done directory is where you want to move them:\nmove_drone_files('D:\\\\TomProject\\\\Images', 'D:\\\\TomProject\\\\Done')\n\n" ]
[ 0 ]
[]
[]
[ "elementtree", "python" ]
stackoverflow_0074508501_elementtree_python.txt
Q: ValueError: Number of labels=34866 does not match number of samples=2 I am trying to run Decision Tree Classifier but I face this problem.Please can you explain me how do I fix this Error?My English isn’t very good but I will try to understand!I'm just starting to learn the program, so please point me out if there's anything that isn't good enough.thank you! import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from sklearn import tree import pandas as pd sale=pd.read_csv('Online_Sale.csv') plt.rcParams['font.sans-serif'] = ['Microsoft JhengHei'] sale['回購'] = sale['回購'].apply(lambda x: 1 if x == 'Y' else 0) sale['單位售價'] = sale['單位售價'].str.replace(',', '').astype(float) x=sale['年紀'],sale['單位售價'] y=sale['回購'] print(x) print(y) clf = DecisionTreeClassifier(random_state=0) model = clf.fit(x, y) text_representation = tree.export_text(clf) print(text_representation) fig = plt.figure(figsize=(15,12)) tree.plot_tree(clf, filled=True) fig.savefig("decistion_tree.png") data: I looked for a lot of different approaches, but I didn't have a way to fully understand what the problem is... A: There is just a small error with: x=sale['年紀'],sale['單位售價'] Rather than selecting the columns you want, this creates a tuple of the columns, hence the end of the error message ... does not match number of samples=2 One way to create a new pd.DataFrame with your selected columns: x=sale[['年紀', '單位售價']]
ValueError: Number of labels=34866 does not match number of samples=2
I am trying to run Decision Tree Classifier but I face this problem.Please can you explain me how do I fix this Error?My English isn’t very good but I will try to understand!I'm just starting to learn the program, so please point me out if there's anything that isn't good enough.thank you! import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeClassifier from sklearn import tree import pandas as pd sale=pd.read_csv('Online_Sale.csv') plt.rcParams['font.sans-serif'] = ['Microsoft JhengHei'] sale['回購'] = sale['回購'].apply(lambda x: 1 if x == 'Y' else 0) sale['單位售價'] = sale['單位售價'].str.replace(',', '').astype(float) x=sale['年紀'],sale['單位售價'] y=sale['回購'] print(x) print(y) clf = DecisionTreeClassifier(random_state=0) model = clf.fit(x, y) text_representation = tree.export_text(clf) print(text_representation) fig = plt.figure(figsize=(15,12)) tree.plot_tree(clf, filled=True) fig.savefig("decistion_tree.png") data: I looked for a lot of different approaches, but I didn't have a way to fully understand what the problem is...
[ "There is just a small error with:\nx=sale['年紀'],sale['單位售價']\n\nRather than selecting the columns you want, this creates a tuple of the columns, hence the end of the error message ... does not match number of samples=2\nOne way to create a new pd.DataFrame with your selected columns:\nx=sale[['年紀', '單位售價']]\n\n" ]
[ 1 ]
[]
[]
[ "decision_tree", "python", "scikit_learn" ]
stackoverflow_0074499590_decision_tree_python_scikit_learn.txt
Q: How do I scroll the comments in a Youtube Video? I have tired I'm trying to build a Youtube Scraper. I've scrapped all the data I wanted from the video but I am not able to scroll all the way to the end of the comments. I have tried the following code: from selenium import webdriver import time url = "https://www.youtube.com/watch?v=L8jN69GEBSw" driver = webdriver.Chrome() driver.get(url) driver.execute_script('window.scrollBy(0, 1000)') time.sleep(0.5) I've tried "Keys" as well but it is not working either I think the problem is this code returning 0. driver.execute_script("return document.body.scrollHeight") Could you someone help? A: Try to use selenium .scroll_by_amount function. You need to do something like this: from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains import time driver=webdriver.Chrome() url = "https://www.youtube.com/watch?v=L8jN69GEBSw" driver.get(url) time.sleep(5) ActionChains(driver).scroll_by_amount(0,5000).perform() You need to find right amount for second parameter. There are some other options like .scroll_from_origin or .scroll_to_element. If that will not be what you want. Try one of these. I recommend to add options and add argument to options, which is "--start-maximized"
How do I scroll the comments in a Youtube Video? I have tired
I'm trying to build a Youtube Scraper. I've scrapped all the data I wanted from the video but I am not able to scroll all the way to the end of the comments. I have tried the following code: from selenium import webdriver import time url = "https://www.youtube.com/watch?v=L8jN69GEBSw" driver = webdriver.Chrome() driver.get(url) driver.execute_script('window.scrollBy(0, 1000)') time.sleep(0.5) I've tried "Keys" as well but it is not working either I think the problem is this code returning 0. driver.execute_script("return document.body.scrollHeight") Could you someone help?
[ "Try to use selenium .scroll_by_amount function. You need to do something like this:\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\ndriver=webdriver.Chrome()\nurl = \"https://www.youtube.com/watch?v=L8jN69GEBSw\"\ndriver.get(url)\ntime.sleep(5)\nActionChains(driver).scroll_by_amount(0,5000).perform()\n\nYou need to find right amount for second parameter. There are some other options like .scroll_from_origin or .scroll_to_element. If that will not be what you want. Try one of these. I recommend to add options and add argument to options, which is \"--start-maximized\"\n" ]
[ 0 ]
[]
[]
[ "automation", "python", "selenium", "selenium_webdriver", "web_scraping" ]
stackoverflow_0074508552_automation_python_selenium_selenium_webdriver_web_scraping.txt
Q: 3 Patterns in one for x in range(y) loop without list comprehension Instead of using 3 loops separately, I'd like to use only one loop and speed up the code. There are 3 different patterns of range(0,150), increasing 3 per loop: 0,3,6,9... 1,4,7,10... 2,5,8,11.... My code: fromlist = [1,2,3,4,5] req1list = ['z','t','y'] req2list = [21,39,52] req3list = [100,200,300] for i in range(0,150,3): req1list.append(fromlist[i]) for j in range(1,150,3): req2list.append(fromlist[j]) for x in range(2,151,3): req3list.append(fromlist[x]) Note that lists are already created and there is data inside the file. Thus, I thought that list comprehension would be impossible. Another note: please ignore the list lengths, in my file the lists are far longer and don't cause errors in []. Is there any way that unites these 3 loops in one, and speed up the code? A: Instead of trying to perform three appends in each iteration (in one loop), you'll get faster results if you call extend instead of append. You could also use slicing to avoid comprehension: req1list.extend(fromlist[::3]) req2list.extend(fromlist[1::3]) req3list.extend(fromlist[2::3]) And if it is important to stop at 150, add it in the slice operation: req1list.extend(fromlist[:150:3]) # ...etc A: A slightly different approach would be to use the grouper function defined in the itertools documentation. def grouper(iterable, n, *, incomplete='fill', fillvalue=None): args = [iter(iterable)] * n if incomplete == 'fill': return zip_longest(*args, fillvalue=fillvalue) if incomplete == 'strict': return zip(*args, strict=True) if incomplete == 'ignore': return zip(*args) else: raise ValueError('Expected fill, strict, or ignore') With this, you can write for x, y, z in grouper(fromlist, 3): req1list.append(x) req2list.append(y) req3list.append(z) (You can copy the definition of grouper from the documentation to your own code, or use the third-party more-itertools package which includes it and many more useful functions for dealing with iterators.)
3 Patterns in one for x in range(y) loop without list comprehension
Instead of using 3 loops separately, I'd like to use only one loop and speed up the code. There are 3 different patterns of range(0,150), increasing 3 per loop: 0,3,6,9... 1,4,7,10... 2,5,8,11.... My code: fromlist = [1,2,3,4,5] req1list = ['z','t','y'] req2list = [21,39,52] req3list = [100,200,300] for i in range(0,150,3): req1list.append(fromlist[i]) for j in range(1,150,3): req2list.append(fromlist[j]) for x in range(2,151,3): req3list.append(fromlist[x]) Note that lists are already created and there is data inside the file. Thus, I thought that list comprehension would be impossible. Another note: please ignore the list lengths, in my file the lists are far longer and don't cause errors in []. Is there any way that unites these 3 loops in one, and speed up the code?
[ "Instead of trying to perform three appends in each iteration (in one loop), you'll get faster results if you call extend instead of append. You could also use slicing to avoid comprehension:\nreq1list.extend(fromlist[::3])\nreq2list.extend(fromlist[1::3])\nreq3list.extend(fromlist[2::3])\n\nAnd if it is important to stop at 150, add it in the slice operation:\nreq1list.extend(fromlist[:150:3])\n# ...etc\n\n", "A slightly different approach would be to use the grouper function defined in the itertools documentation.\ndef grouper(iterable, n, *, incomplete='fill', fillvalue=None):\n args = [iter(iterable)] * n\n if incomplete == 'fill':\n return zip_longest(*args, fillvalue=fillvalue)\n if incomplete == 'strict':\n return zip(*args, strict=True)\n if incomplete == 'ignore':\n return zip(*args)\n else:\n raise ValueError('Expected fill, strict, or ignore')\n\nWith this, you can write\nfor x, y, z in grouper(fromlist, 3):\n req1list.append(x)\n req2list.append(y)\n req3list.append(z)\n\n(You can copy the definition of grouper from the documentation to your own code, or use the third-party more-itertools package which includes it and many more useful functions for dealing with iterators.)\n" ]
[ 2, 0 ]
[]
[]
[ "loops", "python", "range" ]
stackoverflow_0074508464_loops_python_range.txt
Q: What does this error TypeError: 'Button' object is not callable mean? This is my first time coding in tkinter. When I try to create a new button in the function 'Registering' i keep getting the same error 'Button' object is not callable. I don't understand what this error is suggesting about the simple code I have written. Can anyone clarify this for me in the context of the code below? from tkinter import * root = Tk() def Registering(): window = Toplevel(root) login_button = Button(window, width = 120, height = 42) Button = Button(root,text= "Enter",command=Registering) Button.pack() root.mainloop() A: Button = Button(root,text= "Enter",command=Registering) Button.pack() By doing Button = Button (... you override tkinter's definition of Button. Use a different (hopefully more meaningful) name: register_button = Button(root,text= "Enter",command=Registering) register_button.pack() A: the reason its showing the error is cause ur using Button as ur variable name from tkinter import * root = Tk() def Registering(): window = Toplevel(root) login_button = Button(window, width = 120, height = 42) btn= Button(root,text= "Enter",command=Registering) btn.pack() root.mainloop()
What does this error TypeError: 'Button' object is not callable mean?
This is my first time coding in tkinter. When I try to create a new button in the function 'Registering' i keep getting the same error 'Button' object is not callable. I don't understand what this error is suggesting about the simple code I have written. Can anyone clarify this for me in the context of the code below? from tkinter import * root = Tk() def Registering(): window = Toplevel(root) login_button = Button(window, width = 120, height = 42) Button = Button(root,text= "Enter",command=Registering) Button.pack() root.mainloop()
[ "Button = Button(root,text= \"Enter\",command=Registering)\nButton.pack()\n\nBy doing Button = Button (... you override tkinter's definition of Button.\nUse a different (hopefully more meaningful) name:\nregister_button = Button(root,text= \"Enter\",command=Registering)\nregister_button.pack()\n\n", "the reason its showing the error is cause ur using Button as ur variable name\nfrom tkinter import *\nroot = Tk()\n\ndef Registering():\n window = Toplevel(root)\n login_button = Button(window, width = 120, height = 42)\n\n\n\nbtn= Button(root,text= \"Enter\",command=Registering)\nbtn.pack()\n\nroot.mainloop()\n\n" ]
[ 11, 0 ]
[]
[]
[ "python", "tkinter" ]
stackoverflow_0052739334_python_tkinter.txt
Q: How to ignore certain rows while looping over pandas dataframe using iterrows i am trying to loop over a pandas dataframe using iterrows. However, if i reach a certain predetermined row, i was to just skip over that row and now perform the next calculations and just continue to the next row. However, i am very unsure on how to do so. This is what i've trie so far. dish_one = unimp_features.iloc[235] dish_two = unimp_features.iloc[621] dish_three = unimp_features.iloc[831] for index, row in unimp_features.iterrows(): if row == dish_one or row == dish_two or row == dish_three: continue else: df_unimportant.loc[index, 'cos_one'] = 1 - spatial.distance.cosine(dish_one, row) df_unimportant.loc[index, 'cos_two'] = 1 - spatial.distance.cosine(dish_two, row) df_unimportant.loc[index, 'cos_three'] = 1 - spatial.distance.cosine(dish_three, row) The goal is to ignore the row where dish_one, dish_two & dish_three is present, and just go to the next row and continue the next calculations further down in the loop. A: I had to use a Series function called Series.equals(Series) So end result is: for index, row in unimp_features.iterrows(): if row.equals(dish_one) | row.equals(dish_two) | row.equals(dish_three): continue else: df_unimportant.loc[index, 'cos_one'] = 1 - spatial.distance.cosine(dish_one, row) df_unimportant.loc[index, 'cos_two'] = 1 - spatial.distance.cosine(dish_two, row) df_unimportant.loc[index, 'cos_three'] = 1 - spatial.distance.cosine(dish_three, row)
How to ignore certain rows while looping over pandas dataframe using iterrows
i am trying to loop over a pandas dataframe using iterrows. However, if i reach a certain predetermined row, i was to just skip over that row and now perform the next calculations and just continue to the next row. However, i am very unsure on how to do so. This is what i've trie so far. dish_one = unimp_features.iloc[235] dish_two = unimp_features.iloc[621] dish_three = unimp_features.iloc[831] for index, row in unimp_features.iterrows(): if row == dish_one or row == dish_two or row == dish_three: continue else: df_unimportant.loc[index, 'cos_one'] = 1 - spatial.distance.cosine(dish_one, row) df_unimportant.loc[index, 'cos_two'] = 1 - spatial.distance.cosine(dish_two, row) df_unimportant.loc[index, 'cos_three'] = 1 - spatial.distance.cosine(dish_three, row) The goal is to ignore the row where dish_one, dish_two & dish_three is present, and just go to the next row and continue the next calculations further down in the loop.
[ "I had to use a Series function called Series.equals(Series)\nSo end result is:\nfor index, row in unimp_features.iterrows():\n if row.equals(dish_one) | row.equals(dish_two) | row.equals(dish_three):\n continue\n else:\n df_unimportant.loc[index, 'cos_one'] = 1 - spatial.distance.cosine(dish_one, row)\n df_unimportant.loc[index, 'cos_two'] = 1 - spatial.distance.cosine(dish_two, row)\n df_unimportant.loc[index, 'cos_three'] = 1 - spatial.distance.cosine(dish_three, row)\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074508664_pandas_python.txt
Q: How to get all combinations from array python How to create all possible combinations from the elements of the array of certain length For instance N = 6 (length) arr = ['11'] (mean 11 are adjacent) Output: 110000 011000 001100 000110 000011 100001 If arr = ['1','1'] (mean, 11 couldn't be adjacent) N = 6 (length) Output: 101000 100100 100010 010100 010010 010001 001010 001001 000101 I have the following function, but I don't know how to do the combination to be considered adjacent 100001 Code def f(arr, N, start=''): arr1 = arr[1:] alen = sum(map(len, arr1)) + len(arr1) - 1 if (alen): alen += 1 for i in range(N - alen - len(arr[0]) + 1): add = '0' * i + arr[0] if (arr1): f(arr1, N - len(add) - 1, start + add + '0') else: print(start + add + '0' * (N - len(add))) arr = ['11'] N = 6 f(arr, N) A: please test check = [] check2 = [] for x in range(5): arr = [0, 0, 0, 0, 0, 0] list_of_one_poz = [] arr[x] = 1 for y in range(x, 5): list_of_one_poz.append(y+1) for i in list_of_one_poz: arr[i] = 1 txt = ''.join(str(e) for e in arr) r_index = txt.rindex('1') l_index = txt.index('1') l = list(range(l_index+1, r_index)) for p in l: arr[p]=0 if r_index - l_index <=1: check.append(''.join(str(e) for e in arr)) elif r_index - l_index == 5: check.append(''.join(str(e) for e in arr)) else: check2.append(''.join(str(e) for e in arr)) print(check) print("") print(check2)
How to get all combinations from array python
How to create all possible combinations from the elements of the array of certain length For instance N = 6 (length) arr = ['11'] (mean 11 are adjacent) Output: 110000 011000 001100 000110 000011 100001 If arr = ['1','1'] (mean, 11 couldn't be adjacent) N = 6 (length) Output: 101000 100100 100010 010100 010010 010001 001010 001001 000101 I have the following function, but I don't know how to do the combination to be considered adjacent 100001 Code def f(arr, N, start=''): arr1 = arr[1:] alen = sum(map(len, arr1)) + len(arr1) - 1 if (alen): alen += 1 for i in range(N - alen - len(arr[0]) + 1): add = '0' * i + arr[0] if (arr1): f(arr1, N - len(add) - 1, start + add + '0') else: print(start + add + '0' * (N - len(add))) arr = ['11'] N = 6 f(arr, N)
[ "please test\n\ncheck = []\ncheck2 = []\nfor x in range(5):\n arr = [0, 0, 0, 0, 0, 0]\n list_of_one_poz = []\n arr[x] = 1\n for y in range(x, 5):\n list_of_one_poz.append(y+1)\n\n for i in list_of_one_poz:\n arr[i] = 1\n txt = ''.join(str(e) for e in arr)\n r_index = txt.rindex('1')\n l_index = txt.index('1')\n l = list(range(l_index+1, r_index))\n for p in l:\n arr[p]=0\n if r_index - l_index <=1:\n check.append(''.join(str(e) for e in arr))\n elif r_index - l_index == 5:\n check.append(''.join(str(e) for e in arr))\n else:\n check2.append(''.join(str(e) for e in arr))\n\nprint(check)\nprint(\"\")\nprint(check2)\n\n\n\n" ]
[ 0 ]
[]
[]
[ "python" ]
stackoverflow_0074508300_python.txt
Q: Tkinter: how to set a buttons position relative to the screen from tkinter import * Window = Tk() Window.attributes('-fullscreen', True) b1 = Button(Window, text="1", activeforeground="black", activebackground="gray", pady=2, font='secular_one', relief=GROOVE) b1.place(x=1100, y=50) b2 = Button(Window, text="2", activeforeground="black", activebackground="gray", pady=2, font='secular_one', relief=GROOVE) b2.place(x=1100, y=220) b3 = Button(Window, text="3", activeforeground="black", activebackground="gray", pady=2, font='secular_one', relief=GROOVE) b3.place(x=1100, y=380) Window.mainloop() I`m making an app that uses Tkinter and I want to place some buttons in a certain position so after I did that using .place I went to work at another computer but the buttons were in the middle of the screen instead of where I placed them originally A: The kwargs xand y for place define the widget absolute position in pixels. So if you run the program on a display with a different resolution, it won't look the same. Try to define relative positions instead: b1.place(relx=0.3, rely=0.1)
Tkinter: how to set a buttons position relative to the screen
from tkinter import * Window = Tk() Window.attributes('-fullscreen', True) b1 = Button(Window, text="1", activeforeground="black", activebackground="gray", pady=2, font='secular_one', relief=GROOVE) b1.place(x=1100, y=50) b2 = Button(Window, text="2", activeforeground="black", activebackground="gray", pady=2, font='secular_one', relief=GROOVE) b2.place(x=1100, y=220) b3 = Button(Window, text="3", activeforeground="black", activebackground="gray", pady=2, font='secular_one', relief=GROOVE) b3.place(x=1100, y=380) Window.mainloop() I`m making an app that uses Tkinter and I want to place some buttons in a certain position so after I did that using .place I went to work at another computer but the buttons were in the middle of the screen instead of where I placed them originally
[ "The kwargs xand y for place define the widget absolute position in pixels. So if you run the program on a display with a different resolution, it won't look the same.\nTry to define relative positions instead:\nb1.place(relx=0.3, rely=0.1)\n\n" ]
[ 0 ]
[]
[]
[ "python", "tkinter" ]
stackoverflow_0074508853_python_tkinter.txt
Q: How to delete property? class C(): @property def x(self): return 0 delattr(C(), 'x') >>> AttributeError: can't delete attribute I'm aware del C.x works, but this deletes the class's property; can a class instance's property be deleted? A: Refer to this answer; TL;DR, it's not about properties, but bound attributes, and x is bound to the class, not the instance, so it cannot be deleted from an instance when an instance doesn't have it in the first place. Demo: class C(): pass @property def y(self): return 1 c = C() c.y = y del c.y # works c.y >>> AttributeError: 'C' object has no attribute 'y' A: I'm aware del C.x works, but this deletes the class's property; can a class instance's property be deleted? There's no such thing. Properties are defined on the class, there is nothing on the instance in the example you provide. It's like a method, a method is an attribute of the class which Python execute in the context of the instance. A: I got the same error below: AttributeError: can't delete attribute When trying to delete the instance variable name with del as shwon below: class Person: def __init__(self, name): self._name = name @property def name(self): return self._name @name.setter def name(self, name): self._name = name obj = Person("John") print(hasattr(obj, "name")) del obj.name # Here print(hasattr(obj, "name")) So, I added @name.deleter method as shown below: class Person: def __init__(self, name): self._name = name @property def name(self): return self._name @name.setter def name(self, name): self._name = name @name.deleter # Here def name(self): del self._name obj = Person("John") print(hasattr(obj, "name")) del obj.name # Here print(hasattr(obj, "name")) Then, I could delete the instance variable name with del as shown below: True False
How to delete property?
class C(): @property def x(self): return 0 delattr(C(), 'x') >>> AttributeError: can't delete attribute I'm aware del C.x works, but this deletes the class's property; can a class instance's property be deleted?
[ "Refer to this answer; TL;DR, it's not about properties, but bound attributes, and x is bound to the class, not the instance, so it cannot be deleted from an instance when an instance doesn't have it in the first place. Demo:\nclass C():\n pass\n\n@property\ndef y(self):\n return 1\n\nc = C()\nc.y = y\ndel c.y # works\nc.y\n\n>>> AttributeError: 'C' object has no attribute 'y'\n\n", "\nI'm aware del C.x works, but this deletes the class's property; can a class instance's property be deleted? \n\nThere's no such thing. Properties are defined on the class, there is nothing on the instance in the example you provide. It's like a method, a method is an attribute of the class which Python execute in the context of the instance.\n", "I got the same error below:\n\nAttributeError: can't delete attribute\n\nWhen trying to delete the instance variable name with del as shwon below:\nclass Person:\n def __init__(self, name):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n\nobj = Person(\"John\")\n\nprint(hasattr(obj, \"name\"))\n\ndel obj.name # Here\n\nprint(hasattr(obj, \"name\"))\n\nSo, I added @name.deleter method as shown below:\nclass Person:\n def __init__(self, name):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n\n @name.deleter # Here\n def name(self):\n del self._name\n\nobj = Person(\"John\")\n\nprint(hasattr(obj, \"name\"))\n\ndel obj.name # Here\n\nprint(hasattr(obj, \"name\"))\n\nThen, I could delete the instance variable name with del as shown below:\nTrue\nFalse\n\n" ]
[ 2, 1, 0 ]
[ "You can do something like this to delete attr from instance.\nhttps://stackoverflow.com/a/36931502/12789671\nclass C:\n def __init__(self):\n self._x: int = 0\n @property\n def x(self):\n return self._x\n @x.deleter\n def x(self):\n delattr(self, \"_x\")\n\nobj = C()\ndelattr(obj, \"x\")\ntry:\n print(obj.x)\nexcept AttributeError:\n print(\"failed to print x\")\n\nprint(C().x)\n\nfailed to print x\n0\n\n" ]
[ -1 ]
[ "python", "python_3.x" ]
stackoverflow_0062384952_python_python_3.x.txt
Q: Pycharm: import Serial is NOT working but i already did "pip3 install pyserial" i am quite trouble why my pycharm does not recognize import serial. i am doing python code but i need to use Serial. so just from what i found: i need to go to CMD, then enter "pip install pyserial" or "pip3 install pyserial"(this is what i did). after that the installation seems successful, i didnt see any errors after that. i went back to my Pycharm and type import serial or import pyserial despite that, both code are not working... i wonder what is the problem with my Python? -Windows 10 -Python 3.10 -Pycharm Community Version: 2021.2.3 A: Just open the terminal within the Pycharm IDE and use pip to install on there. A: Try uninstalling it from pip and then using the Python Packages tab to install it. It worked for me when I tried that.
Pycharm: import Serial is NOT working but i already did "pip3 install pyserial"
i am quite trouble why my pycharm does not recognize import serial. i am doing python code but i need to use Serial. so just from what i found: i need to go to CMD, then enter "pip install pyserial" or "pip3 install pyserial"(this is what i did). after that the installation seems successful, i didnt see any errors after that. i went back to my Pycharm and type import serial or import pyserial despite that, both code are not working... i wonder what is the problem with my Python? -Windows 10 -Python 3.10 -Pycharm Community Version: 2021.2.3
[ "Just open the terminal within the Pycharm IDE and use pip to install on there.\n", "Try uninstalling it from pip and then using the Python Packages tab to install it. It worked for me when I tried that.\n" ]
[ 0, 0 ]
[ "I had the same problem since I started using python 3.10.\nI found that you have to download the complete pyserial package from github, unzip the entire package and edit the setup.py file and add the line 'Programming Language :: Python :: 3.10',\nand then from the CMD window run python setup.py build\nWith that it works !!!\n" ]
[ -1 ]
[ "pip", "pycharm", "pyserial", "python" ]
stackoverflow_0069833807_pip_pycharm_pyserial_python.txt
Q: An Issue with Cogs (discord.py) Alright so I had some code that was working perfectly without cogs. I created two uses for my bot then decided it was time to start using cogs so that is what I did. The first of my first of my uses was a reaction role maker. So I copied the code and put it in a cog and changed all of the things that I knew that I had to change (client.command -> commands.command for example.) When I try to run the code the reaction role doesn't work and I get no errors. This is my code in main.py import discord from discord.ext import commands from discord import app_commands import os import asyncio intents = discord.Intents.all() bot = commands.Bot(command_prefix=">",intents=intents) asta_guild_id = 1030203387423236136 # Cogs Setup async def load(): for filename in os.listdir("./Cogs"): if filename.endswith(".py"): await bot.load_extension(f"Cogs.{filename[:-3]}") @bot.event async def on_connect(): print("Bot has connected!") @bot.event async def on_ready(): print("Bot is Ready!") async def main(): await load() await bot.start("MY_TOKEN", reconnect=True) asyncio.run(main()) And this is my code in Cogs/ReactionRoles.py import discord from discord.ext import commands from discord import app_commands asta_guild_id = 1030203387423236136 class ReactionRoles(commands.Cog): def __init__(self, bot): self.bot = bot @app_commands.command() async def rr(self, ctx): if ctx.author.top_role.permissions.administrator == True: channel_id = 1039476995324649572 await ctx.reply(f"I will DM you once the reaction roles have been created.") #Title embed=discord.Embed(title="『』Reaction Roles!",description=f"React with the corresponding emoji to customise your roles!") title_msg = await ctx.send(embed=embed) #Ping Roles embed1=discord.Embed(title="『』Ping Roles:",description=f"React with to get pinged for Giveaways\nReact with ⌨️ to get pinged for Chat Revival\nReact with ️ to get pinged for Announcements\nReact with to get pinged for Staff Notices") ping_roles = await ctx.send(embed=embed1) await ping_roles.add_reaction("") await ping_roles.add_reaction("⌨️") await ping_roles.add_reaction("️") await ping_roles.add_reaction("") embed2=discord.Embed(title="『』Color Roles:",description=f"React with for red colour\nReact with for green colour\nReact with for blue colour\nReact with <:pink_square:1041820443692249109> for blue colour") col_roles = await ctx.send(embed=embed2) await col_roles.add_reaction("") await col_roles.add_reaction("") await col_roles.add_reaction("") await col_roles.add_reaction("<:pink_square:1041820443692249109>") dmchannel = await ctx.author.create_dm() dmembed=discord.Embed(title="",description=f"I have finished setting up the reaction roles in [Reaction Roles]({title_msg.jump_url})") await dmchannel.send(embed=dmembed) else: channel_id = 1039476995324649572 Channel = commands.get_channel(channel_id) await ctx.reply("Only admins can complete this command.") @commands.Cog.listener() async def on_raw_reaction_add(self, ctx, payload): Channel = commands.get_channel(1039476995324649572) if payload.channel_id != 1039476995324649572: return await ctx.send("hi") guild = await self.commands.Bot.fetch_guild(payload.guild_id) user = await guild.fetch_member(payload.user_id) #Ping Roles if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Giveaway") await user.add_roles(Role) print("321") if payload.emoji.name == "⌨️": Role = discord.utils.get(guild.roles, name="Chat revival") await user.add_roles(Role) if payload.emoji.name == "️": Role = discord.utils.get(guild.roles, name="Announcement") await user.add_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Staff Notices") await user.add_roles(Role) #Colour Roles if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Red") await user.add_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Green") await user.add_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Blue") await user.add_roles(Role) if payload.emoji.name == "pink_square": Role = discord.utils.get(guild.roles, name="Pink") await user.add_roles(Role) @commands.Cog.listener() async def on_raw_reaction_remove(self, payload): Channel = commands.get_channel(1039476995324649572) guild = commands.get_guild(asta_guild_id) if payload.channel_id != Channel.id: return message = await Channel.fetch_message(payload.message_id) user = await guild.fetch_member(payload.user_id) #Ping Roles if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Giveaway") await user.remove_roles(Role) if payload.emoji.name == "⌨️": Role = discord.utils.get(guild.roles, name="Chat revival") await user.remove_roles(Role) if payload.emoji.name == "️": Role = discord.utils.get(guild.roles, name="Announcement") await user.remove_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Staff Notices") await user.remove_roles(Role) #Colour Roles if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Red") await user.remove_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Green") await user.remove_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Blue") await user.remove_roles(Role) if payload.emoji.name == "pink_square": Role = discord.utils.get(guild.roles, name="Pink") await user.remove_roles(Role) async def setup(bot): await bot.add_cog(ReactionRoles(bot)) The bot just does nothing whenever I right >rr. It is not replying whatsoever to the commands/listeners in the cogs. Please help I have spent like 2 days trying to fix this and tried to research as much as possible A: Change this: @commands.command() to this: @app_commands.command() the decorator has an other name inside a cog.
An Issue with Cogs (discord.py)
Alright so I had some code that was working perfectly without cogs. I created two uses for my bot then decided it was time to start using cogs so that is what I did. The first of my first of my uses was a reaction role maker. So I copied the code and put it in a cog and changed all of the things that I knew that I had to change (client.command -> commands.command for example.) When I try to run the code the reaction role doesn't work and I get no errors. This is my code in main.py import discord from discord.ext import commands from discord import app_commands import os import asyncio intents = discord.Intents.all() bot = commands.Bot(command_prefix=">",intents=intents) asta_guild_id = 1030203387423236136 # Cogs Setup async def load(): for filename in os.listdir("./Cogs"): if filename.endswith(".py"): await bot.load_extension(f"Cogs.{filename[:-3]}") @bot.event async def on_connect(): print("Bot has connected!") @bot.event async def on_ready(): print("Bot is Ready!") async def main(): await load() await bot.start("MY_TOKEN", reconnect=True) asyncio.run(main()) And this is my code in Cogs/ReactionRoles.py import discord from discord.ext import commands from discord import app_commands asta_guild_id = 1030203387423236136 class ReactionRoles(commands.Cog): def __init__(self, bot): self.bot = bot @app_commands.command() async def rr(self, ctx): if ctx.author.top_role.permissions.administrator == True: channel_id = 1039476995324649572 await ctx.reply(f"I will DM you once the reaction roles have been created.") #Title embed=discord.Embed(title="『』Reaction Roles!",description=f"React with the corresponding emoji to customise your roles!") title_msg = await ctx.send(embed=embed) #Ping Roles embed1=discord.Embed(title="『』Ping Roles:",description=f"React with to get pinged for Giveaways\nReact with ⌨️ to get pinged for Chat Revival\nReact with ️ to get pinged for Announcements\nReact with to get pinged for Staff Notices") ping_roles = await ctx.send(embed=embed1) await ping_roles.add_reaction("") await ping_roles.add_reaction("⌨️") await ping_roles.add_reaction("️") await ping_roles.add_reaction("") embed2=discord.Embed(title="『』Color Roles:",description=f"React with for red colour\nReact with for green colour\nReact with for blue colour\nReact with <:pink_square:1041820443692249109> for blue colour") col_roles = await ctx.send(embed=embed2) await col_roles.add_reaction("") await col_roles.add_reaction("") await col_roles.add_reaction("") await col_roles.add_reaction("<:pink_square:1041820443692249109>") dmchannel = await ctx.author.create_dm() dmembed=discord.Embed(title="",description=f"I have finished setting up the reaction roles in [Reaction Roles]({title_msg.jump_url})") await dmchannel.send(embed=dmembed) else: channel_id = 1039476995324649572 Channel = commands.get_channel(channel_id) await ctx.reply("Only admins can complete this command.") @commands.Cog.listener() async def on_raw_reaction_add(self, ctx, payload): Channel = commands.get_channel(1039476995324649572) if payload.channel_id != 1039476995324649572: return await ctx.send("hi") guild = await self.commands.Bot.fetch_guild(payload.guild_id) user = await guild.fetch_member(payload.user_id) #Ping Roles if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Giveaway") await user.add_roles(Role) print("321") if payload.emoji.name == "⌨️": Role = discord.utils.get(guild.roles, name="Chat revival") await user.add_roles(Role) if payload.emoji.name == "️": Role = discord.utils.get(guild.roles, name="Announcement") await user.add_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Staff Notices") await user.add_roles(Role) #Colour Roles if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Red") await user.add_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Green") await user.add_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Blue") await user.add_roles(Role) if payload.emoji.name == "pink_square": Role = discord.utils.get(guild.roles, name="Pink") await user.add_roles(Role) @commands.Cog.listener() async def on_raw_reaction_remove(self, payload): Channel = commands.get_channel(1039476995324649572) guild = commands.get_guild(asta_guild_id) if payload.channel_id != Channel.id: return message = await Channel.fetch_message(payload.message_id) user = await guild.fetch_member(payload.user_id) #Ping Roles if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Giveaway") await user.remove_roles(Role) if payload.emoji.name == "⌨️": Role = discord.utils.get(guild.roles, name="Chat revival") await user.remove_roles(Role) if payload.emoji.name == "️": Role = discord.utils.get(guild.roles, name="Announcement") await user.remove_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Staff Notices") await user.remove_roles(Role) #Colour Roles if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Red") await user.remove_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Green") await user.remove_roles(Role) if payload.emoji.name == "": Role = discord.utils.get(guild.roles, name="Blue") await user.remove_roles(Role) if payload.emoji.name == "pink_square": Role = discord.utils.get(guild.roles, name="Pink") await user.remove_roles(Role) async def setup(bot): await bot.add_cog(ReactionRoles(bot)) The bot just does nothing whenever I right >rr. It is not replying whatsoever to the commands/listeners in the cogs. Please help I have spent like 2 days trying to fix this and tried to research as much as possible
[ "Change this:\n@commands.command()\n\nto this:\n@app_commands.command()\n\nthe decorator has an other name inside a cog.\n" ]
[ 0 ]
[]
[]
[ "discord", "discord.py", "python" ]
stackoverflow_0074499735_discord_discord.py_python.txt
Q: Python venv not installing packages under my virtual environment I have created and activated a virtual environment with Python on my Linux installation (On my AWS EC2 instance). It says it's using the correct python interpreter when I run which python3. But nonetheless when I run python3 -m pip install <package> it's not there when I run pip freeze. It keeps installing to my global installation Python3 installation pip installation: pip freeze: I also get this error when I try to install without the --user flag: ERROR: Could not install packages due to an EnvironmentError: [Errno 13] Permission denied: '/var/www/html/flask_api/flask_env/lib/python3.7/site-packages/ Why is not actually using the interpreter and storing it where it needs to be? A: Why run 'which python' then run python3? ... Try 'which python3' (even better, run 'type python3' because it could be an alias that bypasses your venv) A: Delete Your actual virtual environment. And try again by following the python doc
Python venv not installing packages under my virtual environment
I have created and activated a virtual environment with Python on my Linux installation (On my AWS EC2 instance). It says it's using the correct python interpreter when I run which python3. But nonetheless when I run python3 -m pip install <package> it's not there when I run pip freeze. It keeps installing to my global installation Python3 installation pip installation: pip freeze: I also get this error when I try to install without the --user flag: ERROR: Could not install packages due to an EnvironmentError: [Errno 13] Permission denied: '/var/www/html/flask_api/flask_env/lib/python3.7/site-packages/ Why is not actually using the interpreter and storing it where it needs to be?
[ "Why run 'which python' then run python3? ... Try 'which python3' (even better, run 'type python3' because it could be an alias that bypasses your venv)\n", "Delete Your actual virtual environment. And try again by following the python doc\n" ]
[ 0, 0 ]
[]
[]
[ "linux", "pip", "python", "python_venv" ]
stackoverflow_0067915022_linux_pip_python_python_venv.txt
Q: How to reduce ticks? I have 250 rows of data, it starts january 2002 and ends septemper 2022 and interwal per row is one row/one month of the year. Now i want to plot it but it takes all 250 rows and plot it and i only want like one year shown per tick The y axis is float and x axis is string I have saw that you have to label them manualy but what if i have like realy big data? There must be some efficient way. i have tried something like this: import matplotlib.pyplot as plt X = df_polt['Päivä'] y = df_polt['Diesel'] fig, ax = plt.subplots(1,1,figsize=(15, 10)) ax.plot(X, y) I want that X axis is like 10 ticks A: I believe if you used xticks you'll be fine https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.xticks.html plt.xticks([0, 365, 730], ['2001', '2002', '2003'],rotation=0) An approach like this one is what I propose, you can always customize and/or make it automatic. edit: I assumed that you have data for each day (this affects the points where you add a tick, change it according to your dataset). A: Untested. plt.plot(df_polt['Diesel']) plt.xticks(range(0,241,24)) plt.xticklabels(["%d-01"%y for year in range(2002,2023,2)])
How to reduce ticks?
I have 250 rows of data, it starts january 2002 and ends septemper 2022 and interwal per row is one row/one month of the year. Now i want to plot it but it takes all 250 rows and plot it and i only want like one year shown per tick The y axis is float and x axis is string I have saw that you have to label them manualy but what if i have like realy big data? There must be some efficient way. i have tried something like this: import matplotlib.pyplot as plt X = df_polt['Päivä'] y = df_polt['Diesel'] fig, ax = plt.subplots(1,1,figsize=(15, 10)) ax.plot(X, y) I want that X axis is like 10 ticks
[ "I believe if you used xticks you'll be fine\nhttps://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.xticks.html\nplt.xticks([0, 365, 730], ['2001', '2002', '2003'],rotation=0)\n\nAn approach like this one is what I propose, you can always customize and/or make it automatic.\nedit: I assumed that you have data for each day (this affects the points where you add a tick, change it according to your dataset).\n", "Untested.\nplt.plot(df_polt['Diesel'])\nplt.xticks(range(0,241,24))\nplt.xticklabels([\"%d-01\"%y for year in range(2002,2023,2)])\n\n" ]
[ 0, 0 ]
[]
[]
[ "matplotlib", "plot", "python" ]
stackoverflow_0074508859_matplotlib_plot_python.txt
Q: remove same combinations in dataframe pandas I have a dataframe that is a edgelist for a undirected graph it looks like this: node 1 node 2 doc 0 Kn Kn doc5477 1 TS Kn doc5477 2 Kn TS doc5477 3 TS TS doc5477 4 Kn Kn doc10967 5 Kn TS doc10967 6 TS TS doc10967 7 TS Kn doc10967 How can I make sure that the combinations of nodes for each document only appear once. Meaning that because row 1 and 2 have are the same I only want it to appear once. Same for rows 5 and 7? So that my dataframe looks like this: node 1 node 2 doc 0 Kn Kn doc5477 1 TS Kn doc5477 3 TS TS doc5477 4 Kn Kn doc10967 5 Kn TS doc10967 6 TS TS doc10967 A: First, select the columns on which you need a unique combination (node1, node2 and doc in your case) then apply a sort to return a series with a list of combinations, and finally use a boolean mask with a negative pandas.DataFrame.duplicated to keep only the rows that represent a unique combination. Try this: out= df.loc[~df[['node 1','node 2', 'doc']].apply(sorted, axis=1).duplicated()] # Output : print(out) node 1 node 2 doc 0 Kn Kn doc5477 1 TS Kn doc5477 3 TS TS doc5477 4 Kn Kn doc10967 5 Kn TS doc10967 6 TS TS doc10967
remove same combinations in dataframe pandas
I have a dataframe that is a edgelist for a undirected graph it looks like this: node 1 node 2 doc 0 Kn Kn doc5477 1 TS Kn doc5477 2 Kn TS doc5477 3 TS TS doc5477 4 Kn Kn doc10967 5 Kn TS doc10967 6 TS TS doc10967 7 TS Kn doc10967 How can I make sure that the combinations of nodes for each document only appear once. Meaning that because row 1 and 2 have are the same I only want it to appear once. Same for rows 5 and 7? So that my dataframe looks like this: node 1 node 2 doc 0 Kn Kn doc5477 1 TS Kn doc5477 3 TS TS doc5477 4 Kn Kn doc10967 5 Kn TS doc10967 6 TS TS doc10967
[ "First, select the columns on which you need a unique combination (node1, node2 and doc in your case) then apply a sort to return a series with a list of combinations, and finally use a boolean mask with a negative pandas.DataFrame.duplicated to keep only the rows that represent a unique combination.\nTry this:\nout= df.loc[~df[['node 1','node 2', 'doc']].apply(sorted, axis=1).duplicated()]\n\n# Output :\nprint(out)\n\n node 1 node 2 doc\n0 Kn Kn doc5477\n1 TS Kn doc5477\n3 TS TS doc5477\n4 Kn Kn doc10967\n5 Kn TS doc10967\n6 TS TS doc10967\n\n" ]
[ 2 ]
[]
[]
[ "graph", "pandas", "python" ]
stackoverflow_0074508880_graph_pandas_python.txt
Q: Bot wont join in "join" slash command discord.py V2.0 I need to update some old code to use slash commands and in the old code I have a join command that just makes the bot join the current voice channel. I have done some research but all I could find was just older tutorials on how you did a join command with the old prefix and ctx. The solution I am seeking is a little code example on how a join command could look like without using ctx beacuse thats not in slash commands. The code is not done yet but here is inside my music cog where the command should be: import requests import asyncio from youtube_dl import YoutubeDL from rich import print #--- import discord from discord import app_commands from discord.ext import commands from discord import FFmpegPCMAudio #--- YDL_OPTIONS = { 'format': 'bestaudio/best', 'noplaylist': True, 'quiet': True, 'prostprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '256', }] } FFMPEG_OPTIONS = { 'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn' } #--- class music(commands.Cog): def __init__(self, bot): self.bot = bot @app_commands.command(name = "join", description="Boten joinar din nuvarande röst-kanal!") async def join(self, interaction: discord.Interaction): #bot joins voice channel cod e here await interaction.response.send_message("Ok") async def setup(bot): await bot.add_cog(music(bot)) A: It as pretty much the same for slash commands as compared to normal command, you use theinteraction object instead of ctx. @app_commands.command() async def join(self, interaction: discord.Interaction): channel = interaction.user.voice.channel await channel.connect()
Bot wont join in "join" slash command discord.py V2.0
I need to update some old code to use slash commands and in the old code I have a join command that just makes the bot join the current voice channel. I have done some research but all I could find was just older tutorials on how you did a join command with the old prefix and ctx. The solution I am seeking is a little code example on how a join command could look like without using ctx beacuse thats not in slash commands. The code is not done yet but here is inside my music cog where the command should be: import requests import asyncio from youtube_dl import YoutubeDL from rich import print #--- import discord from discord import app_commands from discord.ext import commands from discord import FFmpegPCMAudio #--- YDL_OPTIONS = { 'format': 'bestaudio/best', 'noplaylist': True, 'quiet': True, 'prostprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '256', }] } FFMPEG_OPTIONS = { 'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn' } #--- class music(commands.Cog): def __init__(self, bot): self.bot = bot @app_commands.command(name = "join", description="Boten joinar din nuvarande röst-kanal!") async def join(self, interaction: discord.Interaction): #bot joins voice channel cod e here await interaction.response.send_message("Ok") async def setup(bot): await bot.add_cog(music(bot))
[ "It as pretty much the same for slash commands as compared to normal command, you use theinteraction object instead of ctx.\n@app_commands.command()\n async def join(self, interaction: discord.Interaction):\n channel = interaction.user.voice.channel\n await channel.connect()\n\n" ]
[ 1 ]
[]
[]
[ "discord", "discord.py", "python" ]
stackoverflow_0074507334_discord_discord.py_python.txt
Q: How to validate list items when they change in a pydantic model? I have a List in a pydantic model. I'd like my custom validator to run when the list changes (not only on assignment). from typing import List from pydantic import BaseModel, validator class A(BaseModel): b: List[int] = [] class Config: validate_assignment = True @validator("b") def positive(cls, v): assert all(i > 0 for i in v), f"No negative numbers: {v}" return v a = A() a.b = [1, 2, -3] # error a.b = [1, 2] # no error a.b.append(-3) # no error I'd like that last append to raise an error. I'll get an error if i try to recreate the object (as expected) A(**a.dict()) Even appending a wrong type is allowed. Why doesn't this break the model? a.b.append("asdf") # no error This is similar/an extension to: How to validate a pydantic object after editing it A: from pydantic import BaseModel, validator from typing import List class PositiveIntList(BaseModel): __root__: List[int] = [] def append(self, value: int) -> None: self.__root__.append(value) super().__init__(__root__=self.__root__) def __getitem__(self, item: int) -> int: return self.__root__[item] def __setitem__(self, item: int, value: int) -> None: self.__root__[item] = value super().__init__(__root__=self.__root__) @validator("__root__", each_item=True) def positive(cls, v): assert v > 0, f"No negative numbers: {v}" return v class A(BaseModel): b: PositiveIntList = PositiveIntList() a = A(b=[1, 2, 3]) a = A(b=[1, 2, -3]) # error a.b = PositiveIntList.parse_obj([4, 5]) a.b = PositiveIntList.parse_obj([4, -5]) # error a.b.append(6) a.b.append(-6) # error a.b[0] = 7 a.b[0] = -7 # error A: I suggest two ways to evaluate the list, one is to use a validator to run when the list changes and the other is to use the field option as follows: from typing import List from pydantic import BaseModel, validator, Field class A(BaseModel): b: List[int] = [] class Config: validate_assignment = True @validator("b") def positive(cls, v): assert all(i > 0 for i in v), f"No negative numbers: {v}" return v class A(BaseModel): b: List[int] = Field(ge=0, le=6, unique_items=True,description="")
How to validate list items when they change in a pydantic model?
I have a List in a pydantic model. I'd like my custom validator to run when the list changes (not only on assignment). from typing import List from pydantic import BaseModel, validator class A(BaseModel): b: List[int] = [] class Config: validate_assignment = True @validator("b") def positive(cls, v): assert all(i > 0 for i in v), f"No negative numbers: {v}" return v a = A() a.b = [1, 2, -3] # error a.b = [1, 2] # no error a.b.append(-3) # no error I'd like that last append to raise an error. I'll get an error if i try to recreate the object (as expected) A(**a.dict()) Even appending a wrong type is allowed. Why doesn't this break the model? a.b.append("asdf") # no error This is similar/an extension to: How to validate a pydantic object after editing it
[ "from pydantic import BaseModel, validator\nfrom typing import List\n\n\nclass PositiveIntList(BaseModel):\n __root__: List[int] = []\n\n def append(self, value: int) -> None:\n self.__root__.append(value)\n super().__init__(__root__=self.__root__)\n\n def __getitem__(self, item: int) -> int:\n return self.__root__[item]\n\n def __setitem__(self, item: int, value: int) -> None:\n self.__root__[item] = value\n super().__init__(__root__=self.__root__)\n\n @validator(\"__root__\", each_item=True)\n def positive(cls, v):\n assert v > 0, f\"No negative numbers: {v}\"\n return v\n\n\nclass A(BaseModel):\n b: PositiveIntList = PositiveIntList()\n\n\na = A(b=[1, 2, 3])\na = A(b=[1, 2, -3]) # error\n\na.b = PositiveIntList.parse_obj([4, 5])\na.b = PositiveIntList.parse_obj([4, -5]) # error\n\na.b.append(6)\na.b.append(-6) # error\n\na.b[0] = 7\na.b[0] = -7 # error\n\n", "I suggest two ways to evaluate the list, one is to use a validator to run when the list changes and the other is to use the field option as follows:\nfrom typing import List\nfrom pydantic import BaseModel, validator, Field\n\nclass A(BaseModel):\n b: List[int] = []\n\n class Config:\n validate_assignment = True\n\n @validator(\"b\")\n def positive(cls, v):\n assert all(i > 0 for i in v), f\"No negative numbers: {v}\"\n return v\n\nclass A(BaseModel):\n b: List[int] = Field(ge=0, le=6, unique_items=True,description=\"\")\n\n\n\n" ]
[ 2, 0 ]
[]
[]
[ "pydantic", "python", "validation" ]
stackoverflow_0067748856_pydantic_python_validation.txt
Q: What's the difference between FastAPI background tasks and Celery tasks? Recently I read something about this and the point was that celery is more productive. Now, I can't find detailed information about the difference between these two and what should be the best way to use them. A: Straight from the documentation: If you need to perform heavy background computation and you don't necessarily need it to be run by the same process (for example, you don't need to share memory, variables, etc), you might benefit from using other bigger tools like Celery. They tend to require more complex configurations, a message/job queue manager, like RabbitMQ or Redis, but they allow you to run background tasks in multiple processes, and especially, in multiple servers. To see an example, check the Project Generators, they all include Celery already configured. But if you need to access variables and objects from the same FastAPI app, or you need to perform small background tasks (like sending an email notification), you can simply just use BackgroundTasks. Have a look at this answer as well.
What's the difference between FastAPI background tasks and Celery tasks?
Recently I read something about this and the point was that celery is more productive. Now, I can't find detailed information about the difference between these two and what should be the best way to use them.
[ "Straight from the documentation:\n\nIf you need to perform heavy background computation and you don't\nnecessarily need it to be run by the same process (for example, you\ndon't need to share memory, variables, etc), you might benefit from\nusing other bigger tools like Celery.\nThey tend to require more complex configurations, a message/job queue\nmanager, like RabbitMQ or Redis, but they allow you to run\nbackground tasks in multiple processes, and especially, in multiple\nservers.\nTo see an example, check the Project Generators, they all include\nCelery already configured.\nBut if you need to access variables and objects from the same\nFastAPI app, or you need to perform small background tasks (like\nsending an email notification), you can simply just use\nBackgroundTasks.\n\nHave a look at this answer as well.\n" ]
[ 1 ]
[]
[]
[ "background_task", "celery", "fastapi", "python", "scheduled_tasks" ]
stackoverflow_0074508774_background_task_celery_fastapi_python_scheduled_tasks.txt
Q: How to efficiently store and render orbits in pygame I followed a tutorial by TechWithTimn youtube and completed this solar system project in pygame. I have a lot of plans to further expand it and I succeeded in many. But when I add more planets and leave the program for some minutes, the fps rate drops and eventually the program crashes due to lack of memory. I figured out it was the orbit rendering part. How can I efficiently store and render the orbital points? The below code is taken from TechWithTim on youtube. import pygame import math pygame.init() WIDTH, HEIGHT = 1200, 750 WIN = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption("Planet Simulation") WHITE = (255, 255, 255) YELLOW = (255, 255, 0) BLUE = (100, 149, 237) RED = (188, 39, 50) DARK_GREY = (80, 78, 81) FONT = pygame.font.SysFont("comicsans", 16) class Planet: AU = 149.6e6 * 1000 G = 6.67428e-11 SCALE = 250 / AU # 1AU = 100 pixels TIMESTEP = 3600*24 # 1 day def __init__(self, x, y, radius, color, mass): self.x = x self.y = y self.radius = radius self.color = color self.mass = mass self.orbit = [] self.sun = False self.distance_to_sun = 0 self.x_vel = 0 self.y_vel = 0 def draw(self, win): x = self.x * self.SCALE + WIDTH / 2 y = self.y * self.SCALE + HEIGHT / 2 if len(self.orbit) > 2: updated_points = [] for point in self.orbit: x, y = point x = x * self.SCALE + WIDTH / 2 y = y * self.SCALE + HEIGHT / 2 updated_points.append((x, y)) pygame.draw.lines(win, self.color, False, updated_points, 2) pygame.draw.circle(win, self.color, (x, y), self.radius) if not self.sun: distance_text = FONT.render(f"{round(self.distance_to_sun/1000, 1)}km", 1, WHITE) win.blit(distance_text, (x - distance_text.get_width()/2, y - distance_text.get_height()/2)) def attraction(self, other): other_x, other_y = other.x, other.y distance_x = other_x - self.x distance_y = other_y - self.y distance = math.sqrt(distance_x ** 2 + distance_y ** 2) if other.sun: self.distance_to_sun = distance force = self.G * self.mass * other.mass / distance**2 theta = math.atan2(distance_y, distance_x) force_x = math.cos(theta) * force force_y = math.sin(theta) * force return force_x, force_y def update_position(self, planets): total_fx = total_fy = 0 for planet in planets: if self == planet: continue fx, fy = self.attraction(planet) total_fx += fx total_fy += fy self.x_vel += total_fx / self.mass * self.TIMESTEP self.y_vel += total_fy / self.mass * self.TIMESTEP self.x += self.x_vel * self.TIMESTEP self.y += self.y_vel * self.TIMESTEP self.orbit.append((self.x, self.y)) def main(): run = True clock = pygame.time.Clock() sun = Planet(0, 0, 30, YELLOW, 1.98892 * 10**30) sun.sun = True earth = Planet(-1 * Planet.AU, 0, 16, BLUE, 5.9742 * 10**24) earth.y_vel = 29.783 * 1000 mars = Planet(-1.524 * Planet.AU, 0, 12, RED, 6.39 * 10**23) mars.y_vel = 24.077 * 1000 mercury = Planet(0.387 * Planet.AU, 0, 8, DARK_GREY, 3.30 * 10**23) mercury.y_vel = -47.4 * 1000 venus = Planet(0.723 * Planet.AU, 0, 14, WHITE, 4.8685 * 10**24) venus.y_vel = -35.02 * 1000 planets = [sun, earth, mars, mercury, venus] while run: clock.tick(60) WIN.fill((0, 0, 0)) for event in pygame.event.get(): if event.type == pygame.QUIT: run = False for planet in planets: planet.update_position(planets) planet.draw(WIN) pygame.display.update() pygame.quit() main() A: I solved your problem very easily. I added only these two lines to end of update_position function, which deletes first dot from array, when the circle is full. if len(self.orbit) > 720: del self.orbit[0] Number 720 is the max length of self.orbit array for the most distant planet. You can change this number for each planet by some calculation. I hope, that my solution will help you. Adam
How to efficiently store and render orbits in pygame
I followed a tutorial by TechWithTimn youtube and completed this solar system project in pygame. I have a lot of plans to further expand it and I succeeded in many. But when I add more planets and leave the program for some minutes, the fps rate drops and eventually the program crashes due to lack of memory. I figured out it was the orbit rendering part. How can I efficiently store and render the orbital points? The below code is taken from TechWithTim on youtube. import pygame import math pygame.init() WIDTH, HEIGHT = 1200, 750 WIN = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption("Planet Simulation") WHITE = (255, 255, 255) YELLOW = (255, 255, 0) BLUE = (100, 149, 237) RED = (188, 39, 50) DARK_GREY = (80, 78, 81) FONT = pygame.font.SysFont("comicsans", 16) class Planet: AU = 149.6e6 * 1000 G = 6.67428e-11 SCALE = 250 / AU # 1AU = 100 pixels TIMESTEP = 3600*24 # 1 day def __init__(self, x, y, radius, color, mass): self.x = x self.y = y self.radius = radius self.color = color self.mass = mass self.orbit = [] self.sun = False self.distance_to_sun = 0 self.x_vel = 0 self.y_vel = 0 def draw(self, win): x = self.x * self.SCALE + WIDTH / 2 y = self.y * self.SCALE + HEIGHT / 2 if len(self.orbit) > 2: updated_points = [] for point in self.orbit: x, y = point x = x * self.SCALE + WIDTH / 2 y = y * self.SCALE + HEIGHT / 2 updated_points.append((x, y)) pygame.draw.lines(win, self.color, False, updated_points, 2) pygame.draw.circle(win, self.color, (x, y), self.radius) if not self.sun: distance_text = FONT.render(f"{round(self.distance_to_sun/1000, 1)}km", 1, WHITE) win.blit(distance_text, (x - distance_text.get_width()/2, y - distance_text.get_height()/2)) def attraction(self, other): other_x, other_y = other.x, other.y distance_x = other_x - self.x distance_y = other_y - self.y distance = math.sqrt(distance_x ** 2 + distance_y ** 2) if other.sun: self.distance_to_sun = distance force = self.G * self.mass * other.mass / distance**2 theta = math.atan2(distance_y, distance_x) force_x = math.cos(theta) * force force_y = math.sin(theta) * force return force_x, force_y def update_position(self, planets): total_fx = total_fy = 0 for planet in planets: if self == planet: continue fx, fy = self.attraction(planet) total_fx += fx total_fy += fy self.x_vel += total_fx / self.mass * self.TIMESTEP self.y_vel += total_fy / self.mass * self.TIMESTEP self.x += self.x_vel * self.TIMESTEP self.y += self.y_vel * self.TIMESTEP self.orbit.append((self.x, self.y)) def main(): run = True clock = pygame.time.Clock() sun = Planet(0, 0, 30, YELLOW, 1.98892 * 10**30) sun.sun = True earth = Planet(-1 * Planet.AU, 0, 16, BLUE, 5.9742 * 10**24) earth.y_vel = 29.783 * 1000 mars = Planet(-1.524 * Planet.AU, 0, 12, RED, 6.39 * 10**23) mars.y_vel = 24.077 * 1000 mercury = Planet(0.387 * Planet.AU, 0, 8, DARK_GREY, 3.30 * 10**23) mercury.y_vel = -47.4 * 1000 venus = Planet(0.723 * Planet.AU, 0, 14, WHITE, 4.8685 * 10**24) venus.y_vel = -35.02 * 1000 planets = [sun, earth, mars, mercury, venus] while run: clock.tick(60) WIN.fill((0, 0, 0)) for event in pygame.event.get(): if event.type == pygame.QUIT: run = False for planet in planets: planet.update_position(planets) planet.draw(WIN) pygame.display.update() pygame.quit() main()
[ "I solved your problem very easily. I added only these two lines to end of update_position function, which deletes first dot from array, when the circle is full.\nif len(self.orbit) > 720:\n del self.orbit[0]\n\nNumber 720 is the max length of self.orbit array for the most distant planet. You can change this number for each planet by some calculation.\nI hope, that my solution will help you.\nAdam\n" ]
[ 1 ]
[]
[]
[ "memory_efficient", "performance", "pygame", "python" ]
stackoverflow_0074508653_memory_efficient_performance_pygame_python.txt
Q: python - collect full path till leaf on organization tree I got organizations tree stored as json { "name": "amos", "direct_reports": [ { "name": "bart", "direct_reports": [ { "name": "colin", "direct_reports": [] }, { "name": "clara", "direct_reports": [] } ] }, { "name": "bravo", "direct_reports": [ { "name": "cupid", "direct_reports": [] }, { "name": "clever", "direct_reports": [] } ] } ] } I need to store full "management path" for each employee, such as: management_chain["clever"]={bravo,amos} management_chain["bart"]={amos} Currently I manage to reach all edges and classify those as employees and managers with code as followed: def get_herarchy(org): tmp_obj = {} tmp_obj['managers'] = [] for emp in org['direct_reports']: tmp_obj['managers'].append(org['name']) print("manager "+org['name']) if len(emp['direct_reports'])>0: get_herarchy(emp) tmp_obj['name'] = emp['name'] print(emp['name']) return tmp_obj But the dictionary doesn't holds the right values A: Like this, maybe: def get_chain(org, name): if org['name'] == name: return [name] for emp in org['direct_reports']: chain = get_chain(emp, name) if chain: return [org['name']] + chain return None print(get_chain(org, 'bart')) # ['amos', 'bart'] print(get_chain(org, 'clever')) # ['amos', 'bravo', 'clever'] UPD: This is how to make a dictionary: def nested_iter(org): yield org['name'] for emp in org['direct_reports']: yield from nested_iter(emp) print({name: get_chain(org, name)[0:-1] for name in nested_iter(org)})
python - collect full path till leaf on organization tree
I got organizations tree stored as json { "name": "amos", "direct_reports": [ { "name": "bart", "direct_reports": [ { "name": "colin", "direct_reports": [] }, { "name": "clara", "direct_reports": [] } ] }, { "name": "bravo", "direct_reports": [ { "name": "cupid", "direct_reports": [] }, { "name": "clever", "direct_reports": [] } ] } ] } I need to store full "management path" for each employee, such as: management_chain["clever"]={bravo,amos} management_chain["bart"]={amos} Currently I manage to reach all edges and classify those as employees and managers with code as followed: def get_herarchy(org): tmp_obj = {} tmp_obj['managers'] = [] for emp in org['direct_reports']: tmp_obj['managers'].append(org['name']) print("manager "+org['name']) if len(emp['direct_reports'])>0: get_herarchy(emp) tmp_obj['name'] = emp['name'] print(emp['name']) return tmp_obj But the dictionary doesn't holds the right values
[ "Like this, maybe:\ndef get_chain(org, name):\n if org['name'] == name:\n return [name]\n for emp in org['direct_reports']:\n chain = get_chain(emp, name)\n if chain:\n return [org['name']] + chain\n return None\n\nprint(get_chain(org, 'bart')) # ['amos', 'bart']\nprint(get_chain(org, 'clever')) # ['amos', 'bravo', 'clever']\n\nUPD: This is how to make a dictionary:\ndef nested_iter(org):\n yield org['name']\n for emp in org['direct_reports']:\n yield from nested_iter(emp)\n\nprint({name: get_chain(org, name)[0:-1] for name in nested_iter(org)})\n\n" ]
[ 1 ]
[]
[]
[ "breadth_first_search", "python", "tree" ]
stackoverflow_0074508822_breadth_first_search_python_tree.txt
Q: Extract text from class 'bs4.element.Tag' beautifulsoup I have the following text in a class 'bs4.element.Tag' object: <span id="my_rate">264.46013</span> How do I strip the value of 264.46013 and get rid of the junk before and after the value? I have seen this and this but I am unable to use the text.split() methods etc. Cheers A: I'm not sure I follow, however, if you are using BeautifulSoup: from bs4 import BeautifulSoup as bs html = '<span id="my_rate">264.46013</span>' soup = bs(html, 'html.parser') value = soup.select_one('span[id="my_rate"]').get_text() print(value) Result: 264.46013
Extract text from class 'bs4.element.Tag' beautifulsoup
I have the following text in a class 'bs4.element.Tag' object: <span id="my_rate">264.46013</span> How do I strip the value of 264.46013 and get rid of the junk before and after the value? I have seen this and this but I am unable to use the text.split() methods etc. Cheers
[ "I'm not sure I follow, however, if you are using BeautifulSoup:\nfrom bs4 import BeautifulSoup as bs\n\nhtml = '<span id=\"my_rate\">264.46013</span>'\n\nsoup = bs(html, 'html.parser')\nvalue = soup.select_one('span[id=\"my_rate\"]').get_text()\nprint(value)\n\nResult:\n264.46013\n\n" ]
[ 2 ]
[]
[]
[ "beautifulsoup", "html", "python" ]
stackoverflow_0074508471_beautifulsoup_html_python.txt
Q: Tkinter: pack's anchor option is not working I've two file for my app, and in my second one page_one.py I can't use properly the anchor method. The label 'left' and 'right' are always positioned in the middle of the screen and not on the side # main.py import tkinter as tk from page_one import PageOne class Main(tk.Frame): def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.page_one = PageOne(self) self.page_one.pack(expand='True') if __name__ == "__main__": root = tk.Tk() main = Main(root) root.attributes("-fullscreen", True) main.pack(side="top", fill="both", expand=True) root.mainloop() # page_one.py import tkinter as tk class PageOne(tk.Frame): def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.one_label = tk.Label(self, text='LEFT') self.one_label.pack(padx=(20,0), side='left', anchor='w') self.two_label = tk.Label(self, text='RIGHT') self.two_label.pack(padx=(0,20), side='right', anchor='e') if __name__ == "__main__": root = tk.Tk() PageOne(root).pack(side="top", fill="both", expand=True) root.mainloop() How can I make the anchor option works? A: That's because your PageOne frame doesn't fill Main. Add fill="both" to its pack method as well: import tkinter as tk class PageOne(tk.Frame): def __init__(self, parent, *args, **kwargs): super().__init__(parent, *args, **kwargs) self.one_label = tk.Label(self, text='LEFT') self.one_label.pack(padx=(20,0), side='left', anchor='w') self.two_label = tk.Label(self, text='RIGHT') self.two_label.pack(padx=(0,20), side='right', anchor='e') class Main(tk.Frame): def __init__(self, parent, *args, **kwargs): super().__init__(parent, *args, **kwargs) self.page_one = PageOne(self) self.page_one.pack(expand='True', fill="both") if __name__ == "__main__": root = tk.Tk() main = Main(root) #root.attributes("-fullscreen", True) root.geometry("1280x720") main.pack(side="top", fill="both", expand=True) root.mainloop() Note that you can use super() in your init functions (without self as argument) A: You have to specify fill="both" when packing the PageOne frame for it to expand completely in the x and y axis. Update your main.py accordingly. # main.py import tkinter as tk from page_one import PageOne class Main(tk.Frame): def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.page_one = PageOne(self) self.page_one.pack(fill="both", expand='True') if __name__ == "__main__": root = tk.Tk() main = Main(root) root.attributes("-fullscreen", True) main.pack(side="top", fill="both", expand=True) root.mainloop()
Tkinter: pack's anchor option is not working
I've two file for my app, and in my second one page_one.py I can't use properly the anchor method. The label 'left' and 'right' are always positioned in the middle of the screen and not on the side # main.py import tkinter as tk from page_one import PageOne class Main(tk.Frame): def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.page_one = PageOne(self) self.page_one.pack(expand='True') if __name__ == "__main__": root = tk.Tk() main = Main(root) root.attributes("-fullscreen", True) main.pack(side="top", fill="both", expand=True) root.mainloop() # page_one.py import tkinter as tk class PageOne(tk.Frame): def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.one_label = tk.Label(self, text='LEFT') self.one_label.pack(padx=(20,0), side='left', anchor='w') self.two_label = tk.Label(self, text='RIGHT') self.two_label.pack(padx=(0,20), side='right', anchor='e') if __name__ == "__main__": root = tk.Tk() PageOne(root).pack(side="top", fill="both", expand=True) root.mainloop() How can I make the anchor option works?
[ "That's because your PageOne frame doesn't fill Main. Add fill=\"both\" to its pack method as well:\nimport tkinter as tk\n\nclass PageOne(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n super().__init__(parent, *args, **kwargs) \n \n self.one_label = tk.Label(self, text='LEFT')\n self.one_label.pack(padx=(20,0), side='left', anchor='w') \n\n self.two_label = tk.Label(self, text='RIGHT')\n self.two_label.pack(padx=(0,20), side='right', anchor='e') \n \n\nclass Main(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n super().__init__(parent, *args, **kwargs)\n\n self.page_one = PageOne(self)\n self.page_one.pack(expand='True', fill=\"both\")\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n main = Main(root)\n #root.attributes(\"-fullscreen\", True)\n root.geometry(\"1280x720\")\n main.pack(side=\"top\", fill=\"both\", expand=True)\n root.mainloop()\n\nNote that you can use super() in your init functions (without self as argument)\n", "You have to specify fill=\"both\" when packing the PageOne frame for it to expand completely in the x and y axis. Update your main.py accordingly.\n# main.py\nimport tkinter as tk\nfrom page_one import PageOne\n\nclass Main(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, *args, **kwargs)\n\n self.page_one = PageOne(self)\n self.page_one.pack(fill=\"both\", expand='True')\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n main = Main(root)\n root.attributes(\"-fullscreen\", True)\n main.pack(side=\"top\", fill=\"both\", expand=True)\n root.mainloop()\n\n" ]
[ 2, 0 ]
[]
[]
[ "python", "tkinter" ]
stackoverflow_0074508954_python_tkinter.txt
Q: Postgres database refusing connection from Airflow: Is the server running on host "localhost" (127.0.0.1) and accepting TCP/IP connections port 5432? I have an existing database (Postgres) that i want to connect to apache-Airflow on my host machine(Windows 10), I installed the apache-airflow on the WSL running ubuntu. The installation was smooth and working fine since i was able to get the airflow webserver running on my localhost(port:8081). I tried connecting airflow to my existing database (carPrices) passing all the necessary parameters which were all correct. I also confirmed my database is up and running on port(5432). Whenever i click the connect button it will report this error..."could not connect to server: Connection refused Is the server running on host "localhost" (127.0.0.1) and accepting TCP/IP connections on port 5432?" I dont know what exactly is the problem as i am new to airflow. I tried setting the connection parameter of airflow by setting it through the airflow.cfg file and through the Airflow UI home. In the first case i cant even "airflow db init" as it report the same problem of connection refusal. the second case will setup a default sqlite db for the airflow UI to run. then i tried connecting using the UI but same error message was given. I check using if the postgres is up and running using netstat -ab and posgres is up and listening. I was expecting the connection to report succesful since i am sure of all the database parameters passed but instead i got this. A: Found out the problem is with WSL 2, you cant connect to localhost from WSL2 without some complicated tweaks... The simplest thing to do is downgrade to WSL 1 running this command in powershell: wsl.exe --set-version Ubuntu-20.04 1
Postgres database refusing connection from Airflow: Is the server running on host "localhost" (127.0.0.1) and accepting TCP/IP connections port 5432?
I have an existing database (Postgres) that i want to connect to apache-Airflow on my host machine(Windows 10), I installed the apache-airflow on the WSL running ubuntu. The installation was smooth and working fine since i was able to get the airflow webserver running on my localhost(port:8081). I tried connecting airflow to my existing database (carPrices) passing all the necessary parameters which were all correct. I also confirmed my database is up and running on port(5432). Whenever i click the connect button it will report this error..."could not connect to server: Connection refused Is the server running on host "localhost" (127.0.0.1) and accepting TCP/IP connections on port 5432?" I dont know what exactly is the problem as i am new to airflow. I tried setting the connection parameter of airflow by setting it through the airflow.cfg file and through the Airflow UI home. In the first case i cant even "airflow db init" as it report the same problem of connection refusal. the second case will setup a default sqlite db for the airflow UI to run. then i tried connecting using the UI but same error message was given. I check using if the postgres is up and running using netstat -ab and posgres is up and listening. I was expecting the connection to report succesful since i am sure of all the database parameters passed but instead i got this.
[ "Found out the problem is with WSL 2, you cant connect to localhost from WSL2 without some complicated tweaks... The simplest thing to do is downgrade to WSL 1\nrunning this command in powershell:\nwsl.exe --set-version Ubuntu-20.04 1\n" ]
[ 0 ]
[]
[]
[ "airflow", "airflow_webserver", "postgresql", "python" ]
stackoverflow_0074300916_airflow_airflow_webserver_postgresql_python.txt
Q: djangocms: command not found I installed django cms by this command $ sudo pip3 install django-cms the installation is completed and returns this: Requirement already satisfied: django-cms in /usr/local/lib/python3.6/dist-packages Requirement already satisfied: django-classy-tags>=0.7.2 in /usr/local/lib/python3.6/dist-packages (from django-cms) Requirement already satisfied: django-formtools>=1.0 in /usr/local/lib/python3.6/dist-packages (from django-cms) Requirement already satisfied: Django<2.0,>=1.8 in /home/amir/.local/lib/python3.6/site-packages (from django-cms) Requirement already satisfied: djangocms-admin-style>=1.0 in /usr/local/lib/python3.6/dist-packages (from django-cms) Requirement already satisfied: django-sekizai>=0.7 in /usr/local/lib/python3.6/dist-packages (from django-cms) Requirement already satisfied: django-treebeard>=4.0.1 in /usr/local/lib/python3.6/dist-packages (from django-cms) However, when I want to use djangocms it returns djangocms: command not found I use Python 3.6 and django 1.9.5 (I know it is an old version but the project belongs to my company and they use this version). A: http://docs.django-cms.org/en/release-3.4.x/introduction/install.html My guess is you forgot to run: pip install djangocms-installer I'm guessing that because I did that too. I installed pip install django-cms, then wondered why it didn't work. A: Just ran into this and got it solved. Afterwards my palm went to my face because I was like "duh" lol. Oh well. Here is what the issue was for me and how I fixed it. cd ~/ ls -al This should give you the files and folders in your user directory. (NOTE: I use AWS instances so most of the time my user will be bitnami unless I change it. Your user will be different.) What you want to look for are .bashrc or .profile or both. I updated both. Use whatever text editor you wish to open that/those files nano .bashrc and/or nano .profile Usually at the top of these files you will have something like PATH=/opt/bitnami/apache/bin:/opt/bitnami/apache2/bin:/opt/bitnami/common/bin: export PATH The djangocms script is installed in the /home/bitnami/.local/bin folder. You will want to add this path to the PATH variable in your .bashrc and .profile files. (NOTE: If you don't use bitnami stacks your path will be slightly different. You can't just copy and paste from here) Now save these files. Here is where my palm went to my forehead. I went back to my django projects directory and issued the command djangocms -f -p . projectname and received djangocms: Command not found I forgot that you have to reload this information into your session or log out and log back in for these changes to take effect. So either log out/reboot or if you don't want to log out you can issue the following command source ~/.bashrc After that everything should work fine. If not there were probably errors with your installation. Refer to the djangocms documentation regarding installation and python compatibility. Hope this helps someone.
djangocms: command not found
I installed django cms by this command $ sudo pip3 install django-cms the installation is completed and returns this: Requirement already satisfied: django-cms in /usr/local/lib/python3.6/dist-packages Requirement already satisfied: django-classy-tags>=0.7.2 in /usr/local/lib/python3.6/dist-packages (from django-cms) Requirement already satisfied: django-formtools>=1.0 in /usr/local/lib/python3.6/dist-packages (from django-cms) Requirement already satisfied: Django<2.0,>=1.8 in /home/amir/.local/lib/python3.6/site-packages (from django-cms) Requirement already satisfied: djangocms-admin-style>=1.0 in /usr/local/lib/python3.6/dist-packages (from django-cms) Requirement already satisfied: django-sekizai>=0.7 in /usr/local/lib/python3.6/dist-packages (from django-cms) Requirement already satisfied: django-treebeard>=4.0.1 in /usr/local/lib/python3.6/dist-packages (from django-cms) However, when I want to use djangocms it returns djangocms: command not found I use Python 3.6 and django 1.9.5 (I know it is an old version but the project belongs to my company and they use this version).
[ "http://docs.django-cms.org/en/release-3.4.x/introduction/install.html\nMy guess is you forgot to run: pip install djangocms-installer\nI'm guessing that because I did that too. I installed pip install django-cms, then wondered why it didn't work. \n", "Just ran into this and got it solved. Afterwards my palm went to my face because I was like \"duh\" lol. Oh well. Here is what the issue was for me and how I fixed it.\ncd ~/\nls -al\n\nThis should give you the files and folders in your user directory. (NOTE: I use AWS instances so most of the time my user will be bitnami unless I change it. Your user will be different.) What you want to look for are .bashrc or .profile or both. I updated both. Use whatever text editor you wish to open that/those files\nnano .bashrc\n\nand/or\nnano .profile\n\nUsually at the top of these files you will have something like\nPATH=/opt/bitnami/apache/bin:/opt/bitnami/apache2/bin:/opt/bitnami/common/bin:\nexport PATH\n\nThe djangocms script is installed in the /home/bitnami/.local/bin folder. You will want to add this path to the PATH variable in your .bashrc and .profile files. (NOTE: If you don't use bitnami stacks your path will be slightly different. You can't just copy and paste from here) Now save these files.\nHere is where my palm went to my forehead. I went back to my django projects directory and issued the command\ndjangocms -f -p . projectname\n\nand received\ndjangocms: Command not found\n\nI forgot that you have to reload this information into your session or log out and log back in for these changes to take effect. So either log out/reboot or if you don't want to log out you can issue the following command\nsource ~/.bashrc\n\nAfter that everything should work fine. If not there were probably errors with your installation. Refer to the djangocms documentation regarding installation and python compatibility. Hope this helps someone.\n" ]
[ 0, 0 ]
[]
[]
[ "django", "django_cms", "python" ]
stackoverflow_0047657871_django_django_cms_python.txt
Q: Create a function called printtype that takes one parameter If the parameter is a string, return "String" If the parameter is an int, return "Int" If the parameter is a float, return "Float" Code:- def printtype(x): if isinstance(x,int): return x elif isinstance(x,float): return x else: isinstance(x,str) return x print(type(printtype(5))) print(type(printtype(5.0))) print(type(printtype("5"))) Error:- Float' != 2.5 : You must retrun Float if a dloat is passed into the printtype function A: This could solve your issue. def printtype(x): if isinstance(x,int): return "Int" elif isinstance(x,float): return "Float" elif isinstance(x,str): return "String" else: return "Unknown type" print(printtype(5)) print(printtype(5.0)) print(printtype("5")) Output:: Int Float String
Create a function called printtype that takes one parameter
If the parameter is a string, return "String" If the parameter is an int, return "Int" If the parameter is a float, return "Float" Code:- def printtype(x): if isinstance(x,int): return x elif isinstance(x,float): return x else: isinstance(x,str) return x print(type(printtype(5))) print(type(printtype(5.0))) print(type(printtype("5"))) Error:- Float' != 2.5 : You must retrun Float if a dloat is passed into the printtype function
[ "This could solve your issue.\ndef printtype(x): \n if isinstance(x,int):\n return \"Int\"\n elif isinstance(x,float):\n return \"Float\"\n elif isinstance(x,str):\n return \"String\"\n else:\n return \"Unknown type\"\n \nprint(printtype(5))\nprint(printtype(5.0))\nprint(printtype(\"5\"))\n\nOutput::\nInt\nFloat\nString\n\n" ]
[ 1 ]
[]
[]
[ "python" ]
stackoverflow_0074509086_python.txt
Q: Iterating a list: getting values as[set(), set(), set(), set(), set()] I have a list (df_pop_initial_list), and it looks like this: [['000000000000000000000000000001011000000'], ['000000001000000000000001000000000010000'], ['000000000000000000000000000000010011000'], ['000000000000001001000000000000010000000'], ['000000000000000000010000001000000010000'], ['1000000000100000000010000000000000000000'], ['1000000010000000000001000000000000000000'], ['1001000000000000000010000000000000000000'], ['000000000000100000000000100000000000010'], ['000000000110000000000000000000001000000'], ['000000101000000010000000000000000000000'], ['000000000000001000000010000100000000000'], ['000000000000000010000101000000000000000'], ['000000001000100000000000000000000100000'], ['000000100000000000000000010000001000000'], ['000000000000001100000000000010000000000'], ['010000000000000000000000000001001000000'], ['000000010100000001000000000000000000000'], ['000000000000000000001000000001100000000'], ['000100000000000100000000000000000000010']] I am trying to count 1's in this 39 bits string list and converting each string value into 3 integer numbers where bits are on (mean finding 1's). My code looks like this: #Finding locations (3 MSUs) using 39 bit encoded string (counting 1's in a chromosome) def indices_initial_pop(chromosome): return {i+1 for i,c in enumerate(chromosome) if c=='1'} #setting dynamic locations according to Chromosomes def intial_population_bit_to_int(df_pop_initial_list): for x in range(0, len(df_pop_initial_list), 1): chrome = df_pop_initial_list[x] msu_locations = indices_initial_pop(chrome) initial_chromosomes_list.append(msu_locations) return initial_chromosomes_list initial_chromosomes_in_int_list = intial_population_bit_to_int(df_pop_initial_list) print (initial_chromosomes_in_int_list) Output: [set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set()] Why it is giving me a set()? A: intial_population_bit_to_int is giving a list of sets because indices_initial_pop always (with the data you use) returns an empty set. Your actual question is why indices_initial_pop returns an empty set. And the answer is because the value you pass as argument in your call, i.e. chrome, is not a string, but a list containing a single string. You can fix this by using chrome = df_pop_initial_list[x][0] instead of chrome = df_pop_initial_list[x] #Finding locations (3 MSUs) using 39 bit encoded string (counting 1's in a chromosome) def indices_initial_pop(chromosome): return {i+1 for i,c in enumerate(chromosome) if c=='1'} #setting dynamic locations according to Chromosomes def intial_population_bit_to_int(df_pop_initial_list): initial_chromosomes_list = [] for x in range(0, len(df_pop_initial_list), 1): chrome = df_pop_initial_list[x][0] msu_locations = indices_initial_pop(chrome) initial_chromosomes_list.append(msu_locations) return initial_chromosomes_list initial_chromosomes_in_int_list = intial_population_bit_to_int(df_pop_initial_list) print (initial_chromosomes_in_int_list) Output: [{32, 33, 30}, {24, 9, 35}, {32, 35, 36}, {32, 18, 15}, {35, 27, 20}, {1, 11, 21}, {1, 9, 22}, {1, 4, 21}, {25, 13, 38}, {33, 10, 11}, {9, 17, 7}, {23, 28, 15}, {24, 17, 22}, {9, 34, 13}, {33, 26, 7}, {16, 29, 15}, {33, 2, 30}, {8, 10, 18}, {21, 30, 31}, {16, 4, 38}]
Iterating a list: getting values as[set(), set(), set(), set(), set()]
I have a list (df_pop_initial_list), and it looks like this: [['000000000000000000000000000001011000000'], ['000000001000000000000001000000000010000'], ['000000000000000000000000000000010011000'], ['000000000000001001000000000000010000000'], ['000000000000000000010000001000000010000'], ['1000000000100000000010000000000000000000'], ['1000000010000000000001000000000000000000'], ['1001000000000000000010000000000000000000'], ['000000000000100000000000100000000000010'], ['000000000110000000000000000000001000000'], ['000000101000000010000000000000000000000'], ['000000000000001000000010000100000000000'], ['000000000000000010000101000000000000000'], ['000000001000100000000000000000000100000'], ['000000100000000000000000010000001000000'], ['000000000000001100000000000010000000000'], ['010000000000000000000000000001001000000'], ['000000010100000001000000000000000000000'], ['000000000000000000001000000001100000000'], ['000100000000000100000000000000000000010']] I am trying to count 1's in this 39 bits string list and converting each string value into 3 integer numbers where bits are on (mean finding 1's). My code looks like this: #Finding locations (3 MSUs) using 39 bit encoded string (counting 1's in a chromosome) def indices_initial_pop(chromosome): return {i+1 for i,c in enumerate(chromosome) if c=='1'} #setting dynamic locations according to Chromosomes def intial_population_bit_to_int(df_pop_initial_list): for x in range(0, len(df_pop_initial_list), 1): chrome = df_pop_initial_list[x] msu_locations = indices_initial_pop(chrome) initial_chromosomes_list.append(msu_locations) return initial_chromosomes_list initial_chromosomes_in_int_list = intial_population_bit_to_int(df_pop_initial_list) print (initial_chromosomes_in_int_list) Output: [set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set(), set()] Why it is giving me a set()?
[ "intial_population_bit_to_int is giving a list of sets because indices_initial_pop always (with the data you use) returns an empty set. Your actual question is why indices_initial_pop returns an empty set. And the answer is because the value you pass as argument in your call, i.e. chrome, is not a string, but a list containing a single string. You can fix this by using\nchrome = df_pop_initial_list[x][0]\ninstead of\nchrome = df_pop_initial_list[x]\n\n#Finding locations (3 MSUs) using 39 bit encoded string (counting 1's in a chromosome)\ndef indices_initial_pop(chromosome):\n return {i+1 for i,c in enumerate(chromosome) if c=='1'}\n\n#setting dynamic locations according to Chromosomes\ndef intial_population_bit_to_int(df_pop_initial_list):\n initial_chromosomes_list = []\n for x in range(0, len(df_pop_initial_list), 1):\n chrome = df_pop_initial_list[x][0]\n msu_locations = indices_initial_pop(chrome)\n initial_chromosomes_list.append(msu_locations)\n \n return initial_chromosomes_list\n\ninitial_chromosomes_in_int_list = intial_population_bit_to_int(df_pop_initial_list)\n\nprint (initial_chromosomes_in_int_list)\n\nOutput: [{32, 33, 30}, {24, 9, 35}, {32, 35, 36}, {32, 18, 15}, {35, 27, 20}, {1, 11, 21}, {1, 9, 22}, {1, 4, 21}, {25, 13, 38}, {33, 10, 11}, {9, 17, 7}, {23, 28, 15}, {24, 17, 22}, {9, 34, 13}, {33, 26, 7}, {16, 29, 15}, {33, 2, 30}, {8, 10, 18}, {21, 30, 31}, {16, 4, 38}]\n" ]
[ 1 ]
[]
[]
[ "genetic_algorithm", "genetic_programming", "jupyter_notebook", "list", "python" ]
stackoverflow_0074509131_genetic_algorithm_genetic_programming_jupyter_notebook_list_python.txt
Q: How to get JSON data in expected format using Python json.dump I read multiple sheets from excel files and combine then to a single JSON file. Sample Data: df1 Metric Value 0 salesamount 9.0 1 salespercentage 80.0 2 salesdays 56.0 3 salesconversionpercentage 0.3 df2 Metric Value 0 FromBudget 4K 1 ToBudget 5K df3 Metric Value 0 Objective Customer Engagement 1 ExpectedOutcomesales 0.2 2 ExpectedOutcomeweeks 8 weeks I then convert them into dictionary using: s = dict(zip(df1.iloc[:,0], df1.iloc[:,1])) eb = dict(zip(df2.iloc[:,0], df2.iloc[:,1])) eo = dict(zip(df3.iloc[:,0], df3.iloc[:,1])) I then store above items is a key ExpectedPlanPerformance using: mydct = { 'ExpectedPlanPerformance' : { 'EstimatedBudget' : eb, 'Sales' : s, 'ExpectedOutcome' : eo } } mydct {'ExpectedPlanPerformance': {'EstimatedBudget': {'FromBudget': '4K', 'ToBudget': '5K'}, 'Sales': ({'salesamount': '9.0', 'salespercentage': '80.0', 'salesdays': '56.0', 'salesconversionpercentage': '0.3'},), 'ExpectedOutcome': {'Objective': 'Customer Engagement', 'ExpectedOutcomesales': 0.2, 'ExpectedOutcomeweeks': '8 weeks'}}} I write this dictionary to JSON using: outfile = open('file.json','w') json.dump(mydct, outfile, indent = 4) outfile.close() The JSON file I append to already contains other elements. Those elements are actually dataframes that were converted to JSON format using: json.loads(df.to_json(orient = 'records')) Once such dataframes are converted to JSON format, they are stored in a dictionary as above and written to file using same json.dump. But the output in the file is in below format: { "ExpectedPlanPerformance": { "EstimatedBudget": "{\"FromBudget\": \"4K\", \"ToBudget\": \"5K\"}", "Sales": "{\"salesamount\": \"9.0\", \"salespercentage\": \"80.0\", \"salesdays\": \"56.0\", \"salesconversionpercentage\": \"0.3\"}", "ExpectedOutcome": "{\"Objective\": \"Customer Engagement\", \"ExpectedOutcomesales\": \"20%\", \"ExpectedOutcomeweeks\": \"8 weeks\"}" } Whereas some other elements are like below: "TotalYield": [ "225K" ], "TotalYieldText": [ "Lorem ipsum door sit amet" ], Can someone please let me know how to fix this, expected output is as below: "ExpectedPlanPerformance": [{ "ExpectedOutcome": { "Objective": "Customer Engagement", "ExpectedOutcomesales": "20%", "ExpectedOutcomeweeks": "8 weeks" }, "Sales": { "salesamount": "9 ", "salespercentage": "80", "salesdays": "56", "salesconversionpercentage": "0.3" }, "EstimatedBudget": { "FromBudget": "4K", "ToBudget": "5K" } }], A: Try: out = { "ExpectedPlanPerformance": [ { "ExpectedOutcome": dict(zip(df3.Metric, df3.Value)), "Sales": dict(zip(df1.Metric, df1.Value)), "EstimatedBudget": dict(zip(df2.Metric, df2.Value)), } ] } print(out) Prints: { "ExpectedPlanPerformance": [ { "ExpectedOutcome": { "Objective": "Customer Engagement", "ExpectedOutcomesales": "0.2", "ExpectedOutcomeweeks": "8 weeks", }, "Sales": { "salesamount": 9.0, "salespercentage": 80.0, "salesdays": 56.0, "salesconversionpercentage": 0.3, }, "EstimatedBudget": {"FromBudget": "4K", "ToBudget": "5K"}, } ] } To save out to a file: import json with open("your_file.json", "w") as f_in: json.dump(out, f_in, indent=4)
How to get JSON data in expected format using Python json.dump
I read multiple sheets from excel files and combine then to a single JSON file. Sample Data: df1 Metric Value 0 salesamount 9.0 1 salespercentage 80.0 2 salesdays 56.0 3 salesconversionpercentage 0.3 df2 Metric Value 0 FromBudget 4K 1 ToBudget 5K df3 Metric Value 0 Objective Customer Engagement 1 ExpectedOutcomesales 0.2 2 ExpectedOutcomeweeks 8 weeks I then convert them into dictionary using: s = dict(zip(df1.iloc[:,0], df1.iloc[:,1])) eb = dict(zip(df2.iloc[:,0], df2.iloc[:,1])) eo = dict(zip(df3.iloc[:,0], df3.iloc[:,1])) I then store above items is a key ExpectedPlanPerformance using: mydct = { 'ExpectedPlanPerformance' : { 'EstimatedBudget' : eb, 'Sales' : s, 'ExpectedOutcome' : eo } } mydct {'ExpectedPlanPerformance': {'EstimatedBudget': {'FromBudget': '4K', 'ToBudget': '5K'}, 'Sales': ({'salesamount': '9.0', 'salespercentage': '80.0', 'salesdays': '56.0', 'salesconversionpercentage': '0.3'},), 'ExpectedOutcome': {'Objective': 'Customer Engagement', 'ExpectedOutcomesales': 0.2, 'ExpectedOutcomeweeks': '8 weeks'}}} I write this dictionary to JSON using: outfile = open('file.json','w') json.dump(mydct, outfile, indent = 4) outfile.close() The JSON file I append to already contains other elements. Those elements are actually dataframes that were converted to JSON format using: json.loads(df.to_json(orient = 'records')) Once such dataframes are converted to JSON format, they are stored in a dictionary as above and written to file using same json.dump. But the output in the file is in below format: { "ExpectedPlanPerformance": { "EstimatedBudget": "{\"FromBudget\": \"4K\", \"ToBudget\": \"5K\"}", "Sales": "{\"salesamount\": \"9.0\", \"salespercentage\": \"80.0\", \"salesdays\": \"56.0\", \"salesconversionpercentage\": \"0.3\"}", "ExpectedOutcome": "{\"Objective\": \"Customer Engagement\", \"ExpectedOutcomesales\": \"20%\", \"ExpectedOutcomeweeks\": \"8 weeks\"}" } Whereas some other elements are like below: "TotalYield": [ "225K" ], "TotalYieldText": [ "Lorem ipsum door sit amet" ], Can someone please let me know how to fix this, expected output is as below: "ExpectedPlanPerformance": [{ "ExpectedOutcome": { "Objective": "Customer Engagement", "ExpectedOutcomesales": "20%", "ExpectedOutcomeweeks": "8 weeks" }, "Sales": { "salesamount": "9 ", "salespercentage": "80", "salesdays": "56", "salesconversionpercentage": "0.3" }, "EstimatedBudget": { "FromBudget": "4K", "ToBudget": "5K" } }],
[ "Try:\nout = {\n \"ExpectedPlanPerformance\": [\n {\n \"ExpectedOutcome\": dict(zip(df3.Metric, df3.Value)),\n \"Sales\": dict(zip(df1.Metric, df1.Value)),\n \"EstimatedBudget\": dict(zip(df2.Metric, df2.Value)),\n }\n ]\n}\n\nprint(out)\n\nPrints:\n{\n \"ExpectedPlanPerformance\": [\n {\n \"ExpectedOutcome\": {\n \"Objective\": \"Customer Engagement\",\n \"ExpectedOutcomesales\": \"0.2\",\n \"ExpectedOutcomeweeks\": \"8 weeks\",\n },\n \"Sales\": {\n \"salesamount\": 9.0,\n \"salespercentage\": 80.0,\n \"salesdays\": 56.0,\n \"salesconversionpercentage\": 0.3,\n },\n \"EstimatedBudget\": {\"FromBudget\": \"4K\", \"ToBudget\": \"5K\"},\n }\n ]\n}\n\n\nTo save out to a file:\nimport json\n\nwith open(\"your_file.json\", \"w\") as f_in:\n json.dump(out, f_in, indent=4)\n\n" ]
[ 1 ]
[]
[]
[ "json", "python" ]
stackoverflow_0074509147_json_python.txt
Q: Run multiple schedule jobs at same time using Python Schedule I am using cx Oracle and schedule module in python. Following is the psuedo code. import schedule,cx_Oracle def db_operation(query): ''' Some DB operations like 1. Get connection 2. Execute query 3. commit result (in case of DML operations) ''' schedule.every().hour.at(":10").do(db_operation,query='some_query_1') # Runs at 10th minute in every hour schedule.every().day.at("13:10").do(db_operation,query='some_query_2') # Runs at 1:10 p.m every day Both the above scheduled jobs calls the same function (which does some DB operations) and will coincide at 13:10. Questions: So how does the scheduler handles this scenario? Like running 2 jobs at the same time. Does it puts in some sort of queue and runs one by one even though time is same? or are they in parallel? Which one gets picked first? and if I would want the priority of first job over second, how to do it? Also, important thing is that at a time only one of these should be accessing the database, otherwise it may lead to inconsistent data. How to take care of this scenario? Like is it possible to put a sort of lock while accessing the function or should the table be locked somehow? A: I took a look at the code of schedule and I have come to the following conclusions: The schedule library does not work in parallel or concurrent. Therefore, jobs that have expired are processed one after the other. They are sorted according to their due date. The job that should be performed furthest in the past is performed first. If jobs are due at the same time, schedule execute the jobs according to the FIFO scheme, regarding the creation of the jobs. So in your example, some_query_1 would be executed before some_query_2. Question three is actually self-explanatory as only one function can be executed at a time. Therefore, the functions should not actually get in each other's way.
Run multiple schedule jobs at same time using Python Schedule
I am using cx Oracle and schedule module in python. Following is the psuedo code. import schedule,cx_Oracle def db_operation(query): ''' Some DB operations like 1. Get connection 2. Execute query 3. commit result (in case of DML operations) ''' schedule.every().hour.at(":10").do(db_operation,query='some_query_1') # Runs at 10th minute in every hour schedule.every().day.at("13:10").do(db_operation,query='some_query_2') # Runs at 1:10 p.m every day Both the above scheduled jobs calls the same function (which does some DB operations) and will coincide at 13:10. Questions: So how does the scheduler handles this scenario? Like running 2 jobs at the same time. Does it puts in some sort of queue and runs one by one even though time is same? or are they in parallel? Which one gets picked first? and if I would want the priority of first job over second, how to do it? Also, important thing is that at a time only one of these should be accessing the database, otherwise it may lead to inconsistent data. How to take care of this scenario? Like is it possible to put a sort of lock while accessing the function or should the table be locked somehow?
[ "I took a look at the code of schedule and I have come to the following conclusions:\n\nThe schedule library does not work in parallel or concurrent. Therefore, jobs that have expired are processed one after the other. They are sorted according to their due date. The job that should be performed furthest in the past is performed first.\nIf jobs are due at the same time, schedule execute the jobs according to the FIFO scheme, regarding the creation of the jobs. So in your example, some_query_1 would be executed before some_query_2.\nQuestion three is actually self-explanatory as only one function can be executed at a time. Therefore, the functions should not actually get in each other's way.\n\n" ]
[ 1 ]
[]
[]
[ "cx_oracle", "python", "python_3.x", "python_schedule" ]
stackoverflow_0074497651_cx_oracle_python_python_3.x_python_schedule.txt
Q: Merge specific rows which have the same ID value in a specific column in pandas DataFrame I have a DataFrame df1 with ID and Amount on specific Dates. I try to sum up the Amount of two specific rows which have the same ID value. df1: Date ID Amount 0 2022-01-02 1200 10.0 1 2022-01-02 1200 1.0 2 2022-01-02 1400 12.0 3 2022-01-02 1500 11.0 4 2022-01-03 1300 12.5 5 2022-01-03 1300 0.5 6 2022-01-03 1500 12.0 This would be the desired output: df1: Date ID Amount 0 2022-01-02 1200 11 <-- 10+1 1 2022-01-02 1200 0 <-- -1 2 2022-01-02 1400 12 3 2022-01-02 1500 11 4 2022-01-03 1300 13 <-- 12.5+0.5 5 2022-01-03 1300 0 <-- -0.5 6 2022-01-03 1500 12 I tried to do it with an np.where() to replace the Amount where the shifted ID value is equal to the ID value. For reproducibility: import pandas as pd df1 = pd.DataFrame({ 'Date':['2022-01-02', '2022-01-02', '2022-01-02', '2022-01-02', '2022-01-03', '2022-01-03', '2022-01-03'], 'ID':[1200, 1200, 1400, 1500, 1300, 1300, 1500], 'Amount':[10, 1, 12, 11, 12.5, 0.5, 12]}) Many thanks for your help! A: If I understand your problem correctly, it looks like a transaction data and the groups you need are by [Date, ID]. If so, then you can achieve it as: df1["Amount"] = df1.groupby(["Date", "ID"])["Amount"].transform(lambda x: [x.sum() if i==0 else 0 for i,_ in enumerate(x)]) Full example. I have added some additional data the end of the dataset just to test the corner condition with more than 2 entries: import pandas as pd df1 = pd.DataFrame({ 'Date':['2022-01-02', '2022-01-02', '2022-01-02', '2022-01-02', '2022-01-03', '2022-01-03', '2022-01-03', '2022-01-04', '2022-01-04', '2022-01-04'], 'ID':[1200, 1200, 1400, 1500, 1300, 1300, 1500, 1500, 1500, 1500], 'Amount':[10, 1, 12, 11, 12.5, 0.5, 12, 10, 3, 5]}) df1["Amount"] = df1.groupby(["Date", "ID"])["Amount"].transform(lambda x: [x.sum() if i==0 else 0 for i,_ in enumerate(x)]) print(df1) [Out]: Date ID Amount 0 2022-01-02 1200 11.0 1 2022-01-02 1200 0.0 2 2022-01-02 1400 12.0 3 2022-01-02 1500 11.0 4 2022-01-03 1300 13.0 5 2022-01-03 1300 0.0 6 2022-01-03 1500 12.0 7 2022-01-04 1500 18.0 8 2022-01-04 1500 0.0 9 2022-01-04 1500 0.0 A: Let's try the following code: Amount=[None]*len(df1) for i in range(1, len(df1)): if df1['ID'][i] == df1['ID'][i-1]: Amount[i] = df1['Amount'][i] - df1['Amount'][i] Amount[i-1] = df1['Amount'][i] + df1['Amount'][i-1] else: Amount[i] = df1['Amount'][i] df1['Amount']=Amount Output >>> df1 Date ID Amount 0 2022-01-02 1200 11.0 1 2022-01-02 1200 0.0 2 2022-01-02 1400 12.0 3 2022-01-02 1500 11.0 4 2022-01-03 1300 13.0 5 2022-01-03 1300 0.0 6 2022-01-03 1500 12.0
Merge specific rows which have the same ID value in a specific column in pandas DataFrame
I have a DataFrame df1 with ID and Amount on specific Dates. I try to sum up the Amount of two specific rows which have the same ID value. df1: Date ID Amount 0 2022-01-02 1200 10.0 1 2022-01-02 1200 1.0 2 2022-01-02 1400 12.0 3 2022-01-02 1500 11.0 4 2022-01-03 1300 12.5 5 2022-01-03 1300 0.5 6 2022-01-03 1500 12.0 This would be the desired output: df1: Date ID Amount 0 2022-01-02 1200 11 <-- 10+1 1 2022-01-02 1200 0 <-- -1 2 2022-01-02 1400 12 3 2022-01-02 1500 11 4 2022-01-03 1300 13 <-- 12.5+0.5 5 2022-01-03 1300 0 <-- -0.5 6 2022-01-03 1500 12 I tried to do it with an np.where() to replace the Amount where the shifted ID value is equal to the ID value. For reproducibility: import pandas as pd df1 = pd.DataFrame({ 'Date':['2022-01-02', '2022-01-02', '2022-01-02', '2022-01-02', '2022-01-03', '2022-01-03', '2022-01-03'], 'ID':[1200, 1200, 1400, 1500, 1300, 1300, 1500], 'Amount':[10, 1, 12, 11, 12.5, 0.5, 12]}) Many thanks for your help!
[ "If I understand your problem correctly, it looks like a transaction data and the groups you need are by [Date, ID].\nIf so, then you can achieve it as:\ndf1[\"Amount\"] = df1.groupby([\"Date\", \"ID\"])[\"Amount\"].transform(lambda x: [x.sum() if i==0 else 0 for i,_ in enumerate(x)])\n\nFull example. I have added some additional data the end of the dataset just to test the corner condition with more than 2 entries:\nimport pandas as pd\n\ndf1 = pd.DataFrame({\n 'Date':['2022-01-02', '2022-01-02', '2022-01-02', '2022-01-02', '2022-01-03', '2022-01-03', '2022-01-03', '2022-01-04', '2022-01-04', '2022-01-04'],\n 'ID':[1200, 1200, 1400, 1500, 1300, 1300, 1500, 1500, 1500, 1500],\n 'Amount':[10, 1, 12, 11, 12.5, 0.5, 12, 10, 3, 5]})\n\ndf1[\"Amount\"] = df1.groupby([\"Date\", \"ID\"])[\"Amount\"].transform(lambda x: [x.sum() if i==0 else 0 for i,_ in enumerate(x)])\n\nprint(df1)\n\n[Out]:\n Date ID Amount\n0 2022-01-02 1200 11.0\n1 2022-01-02 1200 0.0\n2 2022-01-02 1400 12.0\n3 2022-01-02 1500 11.0\n4 2022-01-03 1300 13.0\n5 2022-01-03 1300 0.0\n6 2022-01-03 1500 12.0\n7 2022-01-04 1500 18.0\n8 2022-01-04 1500 0.0\n9 2022-01-04 1500 0.0\n\n", "Let's try the following code:\nAmount=[None]*len(df1)\n\nfor i in range(1, len(df1)):\n\n if df1['ID'][i] == df1['ID'][i-1]:\n\n Amount[i] = df1['Amount'][i] - df1['Amount'][i]\n Amount[i-1] = df1['Amount'][i] + df1['Amount'][i-1]\n\n else:\n\n Amount[i] = df1['Amount'][i]\n\ndf1['Amount']=Amount\n\nOutput\n>>> df1\n Date ID Amount\n0 2022-01-02 1200 11.0\n1 2022-01-02 1200 0.0\n2 2022-01-02 1400 12.0\n3 2022-01-02 1500 11.0\n4 2022-01-03 1300 13.0\n5 2022-01-03 1300 0.0\n6 2022-01-03 1500 12.0\n\n" ]
[ 1, 1 ]
[]
[]
[ "dataframe", "pandas", "python", "shift" ]
stackoverflow_0074508504_dataframe_pandas_python_shift.txt
Q: How to remove links from tags in html? I'm writing scraper in Python with bs4 and want to remove links from all 'a' tags I have html code html_code = '<a href="link">some text</a>' I want to remove href="link" and get only html_code = '<a>some text</a>' How can i do it? A: I would do it following way from bs4 import BeautifulSoup html_code = '<a href="link">some text</a>' soup = BeautifulSoup(html_code) print("Before") print(soup.prettify()) for node in soup.find_all("a"): node.attrs = {} print("After") print(soup.prettify()) gives output Before <html> <body> <a href="link"> some text </a> </body> </html> After <html> <body> <a> some text </a> </body> </html> Note that this will remove all attributes of all <a> tags. A: Does this solve your problem? html_code = html_code.replace(' href="link"','') Output: >>> print(html_code) >>> '<a>some text</a>' A: Try: from bs4 import BeautifulSoup soup = BeautifulSoup('<a href="link">some text</a>', "html.parser") del soup.a.attrs print(soup.a) Prints: <a>some text</a>
How to remove links from tags in html?
I'm writing scraper in Python with bs4 and want to remove links from all 'a' tags I have html code html_code = '<a href="link">some text</a>' I want to remove href="link" and get only html_code = '<a>some text</a>' How can i do it?
[ "I would do it following way\nfrom bs4 import BeautifulSoup\nhtml_code = '<a href=\"link\">some text</a>'\nsoup = BeautifulSoup(html_code)\nprint(\"Before\")\nprint(soup.prettify())\nfor node in soup.find_all(\"a\"):\n node.attrs = {}\nprint(\"After\")\nprint(soup.prettify())\n\ngives output\nBefore\n<html>\n <body>\n <a href=\"link\">\n some text\n </a>\n </body>\n</html>\nAfter\n<html>\n <body>\n <a>\n some text\n </a>\n </body>\n</html>\n\nNote that this will remove all attributes of all <a> tags.\n", "Does this solve your problem?\nhtml_code = html_code.replace(' href=\"link\"','')\n\nOutput:\n>>> print(html_code)\n\n>>> '<a>some text</a>'\n\n", "Try:\nfrom bs4 import BeautifulSoup\n\nsoup = BeautifulSoup('<a href=\"link\">some text</a>', \"html.parser\")\n\ndel soup.a.attrs\nprint(soup.a)\n\nPrints:\n<a>some text</a>\n\n" ]
[ 2, 0, 0 ]
[]
[]
[ "beautifulsoup", "html", "python", "web_scraping" ]
stackoverflow_0074508666_beautifulsoup_html_python_web_scraping.txt
Q: How to sum values of a column where the column name has been duplicated? I have a dataframe: df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'), 'vals': [12,345,-3,1,45,14,4,52,54,23,235,-21,57,-3,87]}) I want to find the sum of 'vals' of each group: a,b,c I've tried using the .sum() function but I'm struggling on how to group all the values of the same letter. A: You can use GroupBy.sum : out= df.groupby("grps", as_index=False).sum() # Output : print(out) grps vals 0 a 410 1 b 154 2 c 338
How to sum values of a column where the column name has been duplicated?
I have a dataframe: df = pd.DataFrame({'grps': list('aaabbcaabcccbbc'), 'vals': [12,345,-3,1,45,14,4,52,54,23,235,-21,57,-3,87]}) I want to find the sum of 'vals' of each group: a,b,c I've tried using the .sum() function but I'm struggling on how to group all the values of the same letter.
[ "You can use GroupBy.sum :\nout= df.groupby(\"grps\", as_index=False).sum()\n\n# Output :\nprint(out)\n\n grps vals\n0 a 410\n1 b 154\n2 c 338\n\n" ]
[ 2 ]
[]
[]
[ "dataframe", "numpy", "pandas", "python" ]
stackoverflow_0074509208_dataframe_numpy_pandas_python.txt
Q: Missing data in excel from 2 products I am working on a code where it is necessary to scrape data from the website of all locomotives. When exporting to Excel, 2 products do not appear: Line 6 in excel (product: 63256) and 7 (product: 69256) Could someone give me a hint why? Here is the code: . . . . import requests from bs4 import BeautifulSoup import pandas as pd import xlsxwriter baseurl = 'https://www.roco.cc/' headers = { 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-US,en;q=0.8', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', } productlinks = [] for x in range(1,2): r = requests.get( f'https://www.roco.cc/ren/products/locomotives/steam-locomotives.html?p={x}&verfuegbarkeit_status=41%2C42%2C43%2C45%2C44') soup = BeautifulSoup(r.content, 'lxml') productlist = soup.find_all('li', class_='item product product-item') for item in productlist: for link in item.find_all('a', class_='product-item-link', href=True): productlinks.append(link['href']) Loco_list = [] Spare_parts_list = [] for link in productlinks: r = requests.get(link, allow_redirects=False) soup = BeautifulSoup(r.content, 'lxml') try: Manufacturer_name = soup.find( 'div', class_='product-head-name').h1.text.strip() except: Manufacturer_name = '' try: Reference = soup.find('span', class_='product-head-artNr').text.strip() except: Reference = '' try: Price = soup.find('div', class_='product-head-price').text.strip() except: Price = '' Type = 'Steam locomotive' try: Scale = soup.find('td', {'data-th': 'Scale'}).text.strip() except: Scale = '' try: Current = soup.find('td', {'data-th': 'Control'}).text.split(' ')[0] except: Current = '' try: Control = soup.find('td', {'data-th': 'Control'}).text.strip() except: Control = '' try: Interface = soup.find('td', {'data-th': 'Interface'}).text.strip() except: Interface = '' try: Digital_decoder = soup.find( 'td', {'data-th': 'Digital decoder'}).text.strip() except: Digital_decoder = '' try: Decoder_Type = soup.find( 'td', {'data-th': 'Decoder-Type'}).text.strip() except: Decoder_Type = '' try: Motor = soup.find('td', {'data-th': 'Motor'}).text.strip() except: Motor = '' try: Flywheel = soup.find('td', {'data-th': 'Flywheel'}).text.strip() except: Flywheel = '' try: Minimum_radius = soup.find( 'td', {'data-th': 'Minimum radius'}).text.strip() except: Minimum_radius = '' try: Length_over_buffer = soup.find( 'td', {'data-th': 'Length over buffer'}).text.strip() except: Length_over_buffer = '' try: Number_of_driven_axles = soup.find( 'td', {'data-th': 'Number of driven axles'}).text.strip() except: Number_of_driven_axles = '' try: Number_of_axles_with_traction_tyres = soup.find( 'td', {'data-th': 'Number of axles with traction tyres'}).text.strip() except: Number_of_axles_with_traction_tyres = '' try: Coupling = soup.find('td', {'data-th': 'Coupling'}).text.strip() except: Coupling = '' try: LED_lighting = soup.find( 'td', {'data-th': 'LED lighting'}).text.strip() except: LED_lighting = '' try: Head_light = soup.find('td', {'data-th': 'Head light'}).text.strip() except: Head_light = '' try: LED_head_light = soup.find( 'td', {'data-th': 'LED head light'}).text.strip() except: LED_head_light = '' try: Country = soup.find( 'td', {'data-th': 'Original (country)'}).text.strip() except: Country = '' try: Railway_company = soup.find( 'td', {'data-th': 'Railway Company'}).text.strip() except: Railway_company = '' try: Epoch = soup.find('td', {'data-th': 'Epoch'}).text.strip() except: Epoch = '' try: Description = soup.find( 'div', class_='product-add-form-text').text.strip() except: Description = '' Locomotives = { 'Manufacturer_name': Manufacturer_name, 'Reference': Reference, 'Price': Price, 'Type': Type, 'Scale': Scale, 'Current': Current, 'Control': Control, 'Interface': Interface, 'Digital_decoder': Digital_decoder, 'Decoder_Type': Decoder_Type, 'Motor': Motor, 'Flywheel': Flywheel, 'Minimum_radius': Minimum_radius, 'Length_over_buffer': Length_over_buffer, 'Number_of_driven_axles': Number_of_driven_axles, 'Number_of_axles_with_traction_tyres': Number_of_axles_with_traction_tyres, 'Coupling': Coupling, 'LED_lighting': LED_lighting, 'Head_light': Head_light, 'LED_head_light': LED_head_light, 'Country': Country, 'Railway_company': Railway_company, 'Epoch': Epoch, 'Description': Description, } Loco_list.append(Locomotives) print(Locomotives) # Manufacturer_name = # Reference = # Spare_part_number = soup.find('td', {'data-th': 'Art. No.:'}).text.strip() # Spare_part_name = soup.find('td', {'data-th': 'Description'}).text.strip() # Price = soup.find('td', {'data-th': 'Price:'}).text.strip() # Spare_parts = { # 'Manufacturer_name': Manufacturer_name, # 'Reference': Reference, # 'Spare_part_number': Spare_part_number, # 'Spare_part_name': Spare_part_name, # 'Price': Price # } # Spare_parts_list.append(Spare_parts) # print(Spare_parts_list) df1 = pd.DataFrame(Loco_list) # df2 = pd.DataFrame(Spare_parts_list) # # df3 = pd.DataFrame() # # df4 = pd.DataFrame() writer = pd.ExcelWriter('Roco - locomotives.xlsx', engine='xlsxwriter') df1.to_excel(writer, sheet_name='Model') # df2.to_excel(writer, sheet_name='Spare parts') # # df3.to_excel(writer, sheet_name='Documents') # # df4.to_excel(writer, sheet_name='Photos') writer.save() print('Saved to file') A: Do not use except the way you do just skipping the error, instead print it and do some research to handle the issue: except Exception as e: print(e) You do not allow redirects, so in some cases you won't get a soup - enabling redirects will lead in some cases to an infinity redirct, what in my opinon is a issue of the webiste. Simply log this urls and add the data manually, if the number is not that high.
Missing data in excel from 2 products
I am working on a code where it is necessary to scrape data from the website of all locomotives. When exporting to Excel, 2 products do not appear: Line 6 in excel (product: 63256) and 7 (product: 69256) Could someone give me a hint why? Here is the code: . . . . import requests from bs4 import BeautifulSoup import pandas as pd import xlsxwriter baseurl = 'https://www.roco.cc/' headers = { 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-US,en;q=0.8', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', } productlinks = [] for x in range(1,2): r = requests.get( f'https://www.roco.cc/ren/products/locomotives/steam-locomotives.html?p={x}&verfuegbarkeit_status=41%2C42%2C43%2C45%2C44') soup = BeautifulSoup(r.content, 'lxml') productlist = soup.find_all('li', class_='item product product-item') for item in productlist: for link in item.find_all('a', class_='product-item-link', href=True): productlinks.append(link['href']) Loco_list = [] Spare_parts_list = [] for link in productlinks: r = requests.get(link, allow_redirects=False) soup = BeautifulSoup(r.content, 'lxml') try: Manufacturer_name = soup.find( 'div', class_='product-head-name').h1.text.strip() except: Manufacturer_name = '' try: Reference = soup.find('span', class_='product-head-artNr').text.strip() except: Reference = '' try: Price = soup.find('div', class_='product-head-price').text.strip() except: Price = '' Type = 'Steam locomotive' try: Scale = soup.find('td', {'data-th': 'Scale'}).text.strip() except: Scale = '' try: Current = soup.find('td', {'data-th': 'Control'}).text.split(' ')[0] except: Current = '' try: Control = soup.find('td', {'data-th': 'Control'}).text.strip() except: Control = '' try: Interface = soup.find('td', {'data-th': 'Interface'}).text.strip() except: Interface = '' try: Digital_decoder = soup.find( 'td', {'data-th': 'Digital decoder'}).text.strip() except: Digital_decoder = '' try: Decoder_Type = soup.find( 'td', {'data-th': 'Decoder-Type'}).text.strip() except: Decoder_Type = '' try: Motor = soup.find('td', {'data-th': 'Motor'}).text.strip() except: Motor = '' try: Flywheel = soup.find('td', {'data-th': 'Flywheel'}).text.strip() except: Flywheel = '' try: Minimum_radius = soup.find( 'td', {'data-th': 'Minimum radius'}).text.strip() except: Minimum_radius = '' try: Length_over_buffer = soup.find( 'td', {'data-th': 'Length over buffer'}).text.strip() except: Length_over_buffer = '' try: Number_of_driven_axles = soup.find( 'td', {'data-th': 'Number of driven axles'}).text.strip() except: Number_of_driven_axles = '' try: Number_of_axles_with_traction_tyres = soup.find( 'td', {'data-th': 'Number of axles with traction tyres'}).text.strip() except: Number_of_axles_with_traction_tyres = '' try: Coupling = soup.find('td', {'data-th': 'Coupling'}).text.strip() except: Coupling = '' try: LED_lighting = soup.find( 'td', {'data-th': 'LED lighting'}).text.strip() except: LED_lighting = '' try: Head_light = soup.find('td', {'data-th': 'Head light'}).text.strip() except: Head_light = '' try: LED_head_light = soup.find( 'td', {'data-th': 'LED head light'}).text.strip() except: LED_head_light = '' try: Country = soup.find( 'td', {'data-th': 'Original (country)'}).text.strip() except: Country = '' try: Railway_company = soup.find( 'td', {'data-th': 'Railway Company'}).text.strip() except: Railway_company = '' try: Epoch = soup.find('td', {'data-th': 'Epoch'}).text.strip() except: Epoch = '' try: Description = soup.find( 'div', class_='product-add-form-text').text.strip() except: Description = '' Locomotives = { 'Manufacturer_name': Manufacturer_name, 'Reference': Reference, 'Price': Price, 'Type': Type, 'Scale': Scale, 'Current': Current, 'Control': Control, 'Interface': Interface, 'Digital_decoder': Digital_decoder, 'Decoder_Type': Decoder_Type, 'Motor': Motor, 'Flywheel': Flywheel, 'Minimum_radius': Minimum_radius, 'Length_over_buffer': Length_over_buffer, 'Number_of_driven_axles': Number_of_driven_axles, 'Number_of_axles_with_traction_tyres': Number_of_axles_with_traction_tyres, 'Coupling': Coupling, 'LED_lighting': LED_lighting, 'Head_light': Head_light, 'LED_head_light': LED_head_light, 'Country': Country, 'Railway_company': Railway_company, 'Epoch': Epoch, 'Description': Description, } Loco_list.append(Locomotives) print(Locomotives) # Manufacturer_name = # Reference = # Spare_part_number = soup.find('td', {'data-th': 'Art. No.:'}).text.strip() # Spare_part_name = soup.find('td', {'data-th': 'Description'}).text.strip() # Price = soup.find('td', {'data-th': 'Price:'}).text.strip() # Spare_parts = { # 'Manufacturer_name': Manufacturer_name, # 'Reference': Reference, # 'Spare_part_number': Spare_part_number, # 'Spare_part_name': Spare_part_name, # 'Price': Price # } # Spare_parts_list.append(Spare_parts) # print(Spare_parts_list) df1 = pd.DataFrame(Loco_list) # df2 = pd.DataFrame(Spare_parts_list) # # df3 = pd.DataFrame() # # df4 = pd.DataFrame() writer = pd.ExcelWriter('Roco - locomotives.xlsx', engine='xlsxwriter') df1.to_excel(writer, sheet_name='Model') # df2.to_excel(writer, sheet_name='Spare parts') # # df3.to_excel(writer, sheet_name='Documents') # # df4.to_excel(writer, sheet_name='Photos') writer.save() print('Saved to file')
[ "Do not use except the way you do just skipping the error, instead print it and do some research to handle the issue:\nexcept Exception as e: \n print(e)\n\nYou do not allow redirects, so in some cases you won't get a soup - enabling redirects will lead in some cases to an infinity redirct, what in my opinon is a issue of the webiste.\nSimply log this urls and add the data manually, if the number is not that high.\n" ]
[ 0 ]
[]
[]
[ "export_to_excel", "python", "web_scraping" ]
stackoverflow_0074502975_export_to_excel_python_web_scraping.txt
Q: Why does del (x) with parentheses around the variable name work? Why does this piece of code work the way it does? x = 3 print(dir()) #output indicates that x is defined in the global scope del (x) print(dir()) #output indicates that x is not defined in the global scope My understanding is that del is a keyword in Python, and what follows del should be a name. (name) is not a name. Why does the example seem to show that del (name) works the same as del name? A: The definition of the del statement is: del_stmt ::= "del" target_list and from the definition of target_list: target_list ::= target ("," target)* [","] target ::= identifier | "(" target_list ")" | "[" [target_list] "]" | ... you can see that parentheses around the list of targets are allowed. For example, if you define x,y = 1,2, all of these are allowed and have the same effect: del x,y del (x,y) del (x),[y] del [x,(y)] del ([x], (y)) A: del statement with or without parentheses as shown below are the same: del (x) del x And, other statements such as if, while, for and assert with or without parentheses as shown below are also the same: if (x == "Hello"): if x == "Hello": while (x == 3): while x == 3: for (x) in (fruits): for x in fruits: assert (x == 3) assert x == 3 In addition, basically, most example python code which I've seen so far doesn't use parentheses for del, if, while, for and assert statements so I prefer not using parentheses for them.
Why does del (x) with parentheses around the variable name work?
Why does this piece of code work the way it does? x = 3 print(dir()) #output indicates that x is defined in the global scope del (x) print(dir()) #output indicates that x is not defined in the global scope My understanding is that del is a keyword in Python, and what follows del should be a name. (name) is not a name. Why does the example seem to show that del (name) works the same as del name?
[ "The definition of the del statement is:\ndel_stmt ::= \"del\" target_list\n\nand from the definition of target_list:\ntarget_list ::= target (\",\" target)* [\",\"]\ntarget ::= identifier\n | \"(\" target_list \")\"\n | \"[\" [target_list] \"]\"\n | ...\n\nyou can see that parentheses around the list of targets are allowed.\nFor example, if you define x,y = 1,2, all of these are allowed and have the same effect:\ndel x,y\ndel (x,y)\ndel (x),[y]\ndel [x,(y)]\ndel ([x], (y))\n\n", "del statement with or without parentheses as shown below are the same:\n\ndel (x)\n\ndel x\n\n\nAnd, other statements such as if, while, for and assert with or without parentheses as shown below are also the same:\n\nif (x == \"Hello\"):\n\nif x == \"Hello\":\n\n\nwhile (x == 3):\n\nwhile x == 3:\n\n\nfor (x) in (fruits):\n\nfor x in fruits:\n\n\nassert (x == 3)\n\nassert x == 3\n\n\nIn addition, basically, most example python code which I've seen so far doesn't use parentheses for del, if, while, for and assert statements so I prefer not using parentheses for them.\n" ]
[ 12, 0 ]
[]
[]
[ "python" ]
stackoverflow_0039028249_python.txt
Q: Python truncate a long string How does one truncate a string to 75 characters in Python? This is how it is done in JavaScript: var data="saddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddsaddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddsadddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" var info = (data.length > 75) ? data.substring[0,75] + '..' : data; A: info = (data[:75] + '..') if len(data) > 75 else data A: Even more concise: data = data[:75] If it is less than 75 characters there will be no change. A: Even shorter : info = data[:75] + (data[75:] and '..') A: If you are using Python 3.4+, you can use textwrap.shorten from the standard library: Collapse and truncate the given text to fit in the given width. First the whitespace in text is collapsed (all whitespace is replaced by single spaces). If the result fits in the width, it is returned. Otherwise, enough words are dropped from the end so that the remaining words plus the placeholder fit within width: >>> textwrap.shorten("Hello world!", width=12) 'Hello world!' >>> textwrap.shorten("Hello world!", width=11) 'Hello [...]' >>> textwrap.shorten("Hello world", width=10, placeholder="...") 'Hello...' A: For a Django solution (which has not been mentioned in the question): from django.utils.text import Truncator value = Truncator(value).chars(75) Have a look at Truncator's source code to appreciate the problem: https://github.com/django/django/blob/master/django/utils/text.py#L66 Concerning truncation with Django: Django HTML truncation A: With regex: re.sub(r'^(.{75}).*$', '\g<1>...', data) Long strings are truncated: >>> data="11111111112222222222333333333344444444445555555555666666666677777777778888888888" >>> re.sub(r'^(.{75}).*$', '\g<1>...', data) '111111111122222222223333333333444444444455555555556666666666777777777788888...' Shorter strings never get truncated: >>> data="11111111112222222222333333" >>> re.sub(r'^(.{75}).*$', '\g<1>...', data) '11111111112222222222333333' This way, you can also "cut" the middle part of the string, which is nicer in some cases: re.sub(r'^(.{5}).*(.{5})$', '\g<1>...\g<2>', data) >>> data="11111111112222222222333333333344444444445555555555666666666677777777778888888888" >>> re.sub(r'^(.{5}).*(.{5})$', '\g<1>...\g<2>', data) '11111...88888' A: limit = 75 info = data[:limit] + '..' * (len(data) > limit) A: This method doesn't use any if: data[:75] + bool(data[75:]) * '..' A: This just in: n = 8 s = '123' print s[:n-3] + (s[n-3:], '...')[len(s) > n] s = '12345678' print s[:n-3] + (s[n-3:], '...')[len(s) > n] s = '123456789' print s[:n-3] + (s[n-3:], '...')[len(s) > n] s = '123456789012345' print s[:n-3] + (s[n-3:], '...')[len(s) > n] 123 12345678 12345... 12345... A: info = data[:min(len(data), 75) A: You can't actually "truncate" a Python string like you can do a dynamically allocated C string. Strings in Python are immutable. What you can do is slice a string as described in other answers, yielding a new string containing only the characters defined by the slice offsets and step. In some (non-practical) cases this can be a little annoying, such as when you choose Python as your interview language and the interviewer asks you to remove duplicate characters from a string in-place. Doh. A: info = data[:75] + ('..' if len(data) > 75 else '') A: Yet another solution. With True and False you get a little feedback about the test at the end. data = {True: data[:75] + '..', False: data}[len(data) > 75] A: Coming very late to the party I want to add my solution to trim text at character level that also handles whitespaces properly. def trim_string(s: str, limit: int, ellipsis='…') -> str: s = s.strip() if len(s) > limit: return s[:limit-1].strip() + ellipsis return s Simple, but it will make sure you that hello world with limit=6 will not result in an ugly hello … but hello… instead. It also removes leading and trailing whitespaces, but not spaces inside. If you also want to remove spaces inside, checkout this stackoverflow post A: >>> info = lambda data: len(data)>10 and data[:10]+'...' or data >>> info('sdfsdfsdfsdfsdfsdfsdfsdfsdfsdfsdf') 'sdfsdfsdfs...' >>> info('sdfsdf') 'sdfsdf' >>> A: Simple and short helper function: def truncate_string(value, max_length=255, suffix='...'): string_value = str(value) string_truncated = string_value[:min(len(string_value), (max_length - len(suffix)))] suffix = (suffix if len(string_value) > max_length else '') return string_truncated+suffix Usage examples: # Example 1 (default): long_string = "" for number in range(1, 1000): long_string += str(number) + ',' result = truncate_string(long_string) print(result) # Example 2 (custom length): short_string = 'Hello world' result = truncate_string(short_string, 8) print(result) # > Hello... # Example 3 (not truncated): short_string = 'Hello world' result = truncate_string(short_string) print(result) # > Hello world A: There's no need for a regular expression but you do want to use string formatting rather than the string concatenation in the accepted answer. This is probably the most canonical, Pythonic way to truncate the string data at 75 characters. >>> data = "saddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddsaddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddsadddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" >>> info = "{}..".format(data[:75]) if len(data) > 75 else data >>> info '111111111122222222223333333333444444444455555555556666666666777777777788888...' A: Here's a function I made as part of a new String class... It allows adding a suffix ( if the string is size after trimming and adding it is long enough - although you don't need to force the absolute size ) I was in the process of changing a few things around so there are some useless logic costs ( if _truncate ... for instance ) where it is no longer necessary and there is a return at the top... But, it is still a good function for truncating data... ## ## Truncate characters of a string after _len'nth char, if necessary... If _len is less than 0, don't truncate anything... Note: If you attach a suffix, and you enable absolute max length then the suffix length is subtracted from max length... Note: If the suffix length is longer than the output then no suffix is used... ## ## Usage: Where _text = 'Testing', _width = 4 ## _data = String.Truncate( _text, _width ) == Test ## _data = String.Truncate( _text, _width, '..', True ) == Te.. ## ## Equivalent Alternates: Where _text = 'Testing', _width = 4 ## _data = String.SubStr( _text, 0, _width ) == Test ## _data = _text[ : _width ] == Test ## _data = ( _text )[ : _width ] == Test ## def Truncate( _text, _max_len = -1, _suffix = False, _absolute_max_len = True ): ## Length of the string we are considering for truncation _len = len( _text ) ## Whether or not we have to truncate _truncate = ( False, True )[ _len > _max_len ] ## Note: If we don't need to truncate, there's no point in proceeding... if ( not _truncate ): return _text ## The suffix in string form _suffix_str = ( '', str( _suffix ) )[ _truncate and _suffix != False ] ## The suffix length _len_suffix = len( _suffix_str ) ## Whether or not we add the suffix _add_suffix = ( False, True )[ _truncate and _suffix != False and _max_len > _len_suffix ] ## Suffix Offset _suffix_offset = _max_len - _len_suffix _suffix_offset = ( _max_len, _suffix_offset )[ _add_suffix and _absolute_max_len != False and _suffix_offset > 0 ] ## The truncate point.... If not necessary, then length of string.. If necessary then the max length with or without subtracting the suffix length... Note: It may be easier ( less logic cost ) to simply add the suffix to the calculated point, then truncate - if point is negative then the suffix will be destroyed anyway. ## If we don't need to truncate, then the length is the length of the string.. If we do need to truncate, then the length depends on whether we add the suffix and offset the length of the suffix or not... _len_truncate = ( _len, _max_len )[ _truncate ] _len_truncate = ( _len_truncate, _max_len )[ _len_truncate <= _max_len ] ## If we add the suffix, add it... Suffix won't be added if the suffix is the same length as the text being output... if ( _add_suffix ): _text = _text[ 0 : _suffix_offset ] + _suffix_str + _text[ _suffix_offset: ] ## Return the text after truncating... return _text[ : _len_truncate ] A: Here I use textwrap.shorten and handle more edge cases. also include part of the last word in case this word is more than 50% of the max width. import textwrap def shorten(text: str, width=30, placeholder="..."): """Collapse and truncate the given text to fit in the given width. The text first has its whitespace collapsed. If it then fits in the *width*, it is returned as is. Otherwise, as many words as possible are joined and then the placeholder is appended. """ if not text or not isinstance(text, str): return str(text) t = text.strip() if len(t) <= width: return t # textwrap.shorten also throws ValueError if placeholder too large for max width shorten_words = textwrap.shorten(t, width=width, placeholder=placeholder) # textwrap.shorten doesn't split words, so if the text contains a long word without spaces, the result may be too short without this word. # Here we use a different way to include the start of this word in case shorten_words is less than 50% of `width` if len(shorten_words) - len(placeholder) < (width - len(placeholder)) * 0.5: return t[:width - len(placeholder)].strip() + placeholder return shorten_words Tests: >>> shorten("123 456", width=7, placeholder="...") '123 456' >>> shorten("1 23 45 678 9", width=12, placeholder="...") '1 23 45...' >>> shorten("1 23 45 678 9", width=10, placeholder="...") '1 23 45...' >>> shorten("01 23456789", width=10, placeholder="...") '01 2345...' >>> shorten("012 3 45678901234567", width=17, placeholder="...") '012 3 45678901...' >>> shorten("1 23 45 678 9", width=9, placeholder="...") '1 23...' >>> shorten("1 23456", width=5, placeholder="...") '1...' >>> shorten("123 456", width=5, placeholder="...") '12...' >>> shorten("123 456", width=6, placeholder="...") '123...' >>> shorten("12 3456789", width=9, placeholder="...") '12 345...' >>> shorten(" 12 3456789 ", width=9, placeholder="...") '12 345...' >>> shorten('123 45', width=4, placeholder="...") '1...' >>> shorten('123 45', width=3, placeholder="...") '...' >>> shorten("123456", width=3, placeholder="...") '...' >>> shorten([1], width=9, placeholder="...") '[1]' >>> shorten(None, width=5, placeholder="...") 'None' >>> shorten("", width=9, placeholder="...") '' A: Suppose that stryng is a string which we wish to truncate and that nchars is the number of characters desired in the output string. stryng = "sadddddddddddddddddddddddddddddddddddddddddddddddddd" nchars = 10 We can truncate the string as follows: def truncate(stryng:str, nchars:int): return (stryng[:nchars - 6] + " [...]")[:min(len(stryng), nchars)] The results for certain test cases are shown below: s = "sadddddddddddddddddddddddddddddd!" s = "sa" + 30*"d" + "!" truncate(s, 2) == sa truncate(s, 4) == sadd truncate(s, 10) == sadd [...] truncate(s, len(s)//2) == sadddddddd [...] My solution produces reasonable results for the test cases above. However, some pathological cases are shown below: Some Pathological Cases! truncate(s, len(s) - 3)() == sadddddddddddddddddddddd [...] truncate(s, len(s) - 2)() == saddddddddddddddddddddddd [...] truncate(s, len(s) - 1)() == sadddddddddddddddddddddddd [...] truncate(s, len(s) + 0)() == saddddddddddddddddddddddddd [...] truncate(s, len(s) + 1)() == sadddddddddddddddddddddddddd [... truncate(s, len(s) + 2)() == saddddddddddddddddddddddddddd [.. truncate(s, len(s) + 3)() == sadddddddddddddddddddddddddddd [. truncate(s, len(s) + 4)() == saddddddddddddddddddddddddddddd [ truncate(s, len(s) + 5)() == sadddddddddddddddddddddddddddddd truncate(s, len(s) + 6)() == sadddddddddddddddddddddddddddddd! truncate(s, len(s) + 7)() == sadddddddddddddddddddddddddddddd! truncate(s, 9999)() == sadddddddddddddddddddddddddddddd! Notably, When the string contains new-line characters (\n) there could be an issue. When nchars > len(s) we should print string s without trying to print the "[...]" Below is some more code: import io class truncate: """ Example of Code Which Uses truncate: ``` s = "\r<class\n 'builtin_function_or_method'>" s = truncate(s, 10)() print(s) ``` Examples of Inputs and Outputs: truncate(s, 2)() == \r truncate(s, 4)() == \r<c truncate(s, 10)() == \r<c [...] truncate(s, 20)() == \r<class\n 'bu [...] truncate(s, 999)() == \r<class\n 'builtin_function_or_method'> ``` Other Notes: Returns a modified copy of string input Does not modify the original string """ def __init__(self, x_stryng: str, x_nchars: int) -> str: """ This initializer mostly exists to sanitize function inputs """ try: stryng = repr("".join(str(ch) for ch in x_stryng))[1:-1] nchars = int(str(x_nchars)) except BaseException as exc: invalid_stryng = str(x_stryng) invalid_stryng_truncated = repr(type(self)(invalid_stryng, 20)()) invalid_x_nchars = str(x_nchars) invalid_x_nchars_truncated = repr(type(self)(invalid_x_nchars, 20)()) strm = io.StringIO() print("Invalid Function Inputs", file=strm) print(type(self).__name__, "(", invalid_stryng_truncated, ", ", invalid_x_nchars_truncated, ")", sep="", file=strm) msg = strm.getvalue() raise ValueError(msg) from None self._stryng = stryng self._nchars = nchars def __call__(self) -> str: stryng = self._stryng nchars = self._nchars return (stryng[:nchars - 6] + " [...]")[:min(len(stryng), nchars)] A: Here's a simple function that will truncate a given string from either side: def truncate(string, length=75, beginning=True, insert='..'): '''Shorten the given string to the given length. An ellipsis will be added to the section trimmed. :Parameters: length (int) = The maximum allowed length before trunicating. beginning (bool) = Trim starting chars, else; ending. insert (str) = Chars to add at the trimmed area. (default: ellipsis) :Return: (str) ex. call: truncate('12345678', 4) returns: '..5678' ''' if len(string)>length: if beginning: #trim starting chars. string = insert+string[-length:] else: #trim ending chars. string = string[:length]+insert return string A: If you wish to do some more sophisticated string truncate you can adopt sklearn approach as implement by: sklearn.base.BaseEstimator.__repr__ (See Original full code at: https://github.com/scikit-learn/scikit-learn/blob/f3f51f9b6/sklearn/base.py#L262) It adds benefits such as avoiding truncate in the middle of the word. def truncate_string(data, N_CHAR_MAX=70): # N_CHAR_MAX is the (approximate) maximum number of non-blank # characters to render. We pass it as an optional parameter to ease # the tests. lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends regex = r"^(\s*\S){%d}" % lim # The regex '^(\s*\S){%d}' % n # matches from the start of the string until the nth non-blank # character: # - ^ matches the start of string # - (pattern){n} matches n repetitions of pattern # - \s*\S matches a non-blank char following zero or more blanks left_lim = re.match(regex, data).end() right_lim = re.match(regex, data[::-1]).end() if "\n" in data[left_lim:-right_lim]: # The left side and right side aren't on the same line. # To avoid weird cuts, e.g.: # categoric...ore', # we need to start the right side with an appropriate newline # character so that it renders properly as: # categoric... # handle_unknown='ignore', # so we add [^\n]*\n which matches until the next \n regex += r"[^\n]*\n" right_lim = re.match(regex, data[::-1]).end() ellipsis = "..." if left_lim + len(ellipsis) < len(data) - right_lim: # Only add ellipsis if it results in a shorter repr data = data[:left_lim] + "..." + data[-right_lim:] return data
Python truncate a long string
How does one truncate a string to 75 characters in Python? This is how it is done in JavaScript: var data="saddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddsaddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddsadddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd" var info = (data.length > 75) ? data.substring[0,75] + '..' : data;
[ "info = (data[:75] + '..') if len(data) > 75 else data\n\n", "Even more concise:\ndata = data[:75]\n\nIf it is less than 75 characters there will be no change.\n", "Even shorter :\ninfo = data[:75] + (data[75:] and '..')\n\n", "If you are using Python 3.4+, you can use textwrap.shorten from the standard library:\n\nCollapse and truncate the given text to fit in the given width.\nFirst the whitespace in text is collapsed (all whitespace is replaced\n by single spaces). If the result fits in the width, it is returned.\n Otherwise, enough words are dropped from the end so that the remaining\n words plus the placeholder fit within width:\n>>> textwrap.shorten(\"Hello world!\", width=12)\n'Hello world!'\n>>> textwrap.shorten(\"Hello world!\", width=11)\n'Hello [...]'\n>>> textwrap.shorten(\"Hello world\", width=10, placeholder=\"...\")\n'Hello...'\n\n\n", "For a Django solution (which has not been mentioned in the question):\nfrom django.utils.text import Truncator\nvalue = Truncator(value).chars(75)\n\nHave a look at Truncator's source code to appreciate the problem:\nhttps://github.com/django/django/blob/master/django/utils/text.py#L66\nConcerning truncation with Django:\nDjango HTML truncation\n", "With regex:\nre.sub(r'^(.{75}).*$', '\\g<1>...', data)\n\nLong strings are truncated:\n>>> data=\"11111111112222222222333333333344444444445555555555666666666677777777778888888888\"\n>>> re.sub(r'^(.{75}).*$', '\\g<1>...', data)\n'111111111122222222223333333333444444444455555555556666666666777777777788888...'\n\nShorter strings never get truncated:\n>>> data=\"11111111112222222222333333\"\n>>> re.sub(r'^(.{75}).*$', '\\g<1>...', data)\n'11111111112222222222333333'\n\nThis way, you can also \"cut\" the middle part of the string, which is nicer in some cases:\nre.sub(r'^(.{5}).*(.{5})$', '\\g<1>...\\g<2>', data)\n\n>>> data=\"11111111112222222222333333333344444444445555555555666666666677777777778888888888\"\n>>> re.sub(r'^(.{5}).*(.{5})$', '\\g<1>...\\g<2>', data)\n'11111...88888'\n\n", "limit = 75\ninfo = data[:limit] + '..' * (len(data) > limit)\n\n", "This method doesn't use any if:\n\ndata[:75] + bool(data[75:]) * '..'\n\n", "This just in:\nn = 8\ns = '123'\nprint s[:n-3] + (s[n-3:], '...')[len(s) > n]\ns = '12345678'\nprint s[:n-3] + (s[n-3:], '...')[len(s) > n]\ns = '123456789' \nprint s[:n-3] + (s[n-3:], '...')[len(s) > n]\ns = '123456789012345'\nprint s[:n-3] + (s[n-3:], '...')[len(s) > n]\n\n123\n12345678\n12345...\n12345...\n\n", "info = data[:min(len(data), 75)\n\n", "You can't actually \"truncate\" a Python string like you can do a dynamically allocated C string. Strings in Python are immutable. What you can do is slice a string as described in other answers, yielding a new string containing only the characters defined by the slice offsets and step.\nIn some (non-practical) cases this can be a little annoying, such as when you choose Python as your interview language and the interviewer asks you to remove duplicate characters from a string in-place. Doh.\n", "info = data[:75] + ('..' if len(data) > 75 else '')\n\n", "Yet another solution. With True and False you get a little feedback about the test at the end.\ndata = {True: data[:75] + '..', False: data}[len(data) > 75]\n\n", "Coming very late to the party I want to add my solution to trim text at character level that also handles whitespaces properly.\ndef trim_string(s: str, limit: int, ellipsis='…') -> str:\n s = s.strip()\n if len(s) > limit:\n return s[:limit-1].strip() + ellipsis\n return s\n\nSimple, but it will make sure you that hello world with limit=6 will not result in an ugly hello … but hello… instead.\nIt also removes leading and trailing whitespaces, but not spaces inside. If you also want to remove spaces inside, checkout this stackoverflow post\n", " >>> info = lambda data: len(data)>10 and data[:10]+'...' or data\n >>> info('sdfsdfsdfsdfsdfsdfsdfsdfsdfsdfsdf')\n 'sdfsdfsdfs...'\n >>> info('sdfsdf')\n 'sdfsdf'\n >>> \n\n", "Simple and short helper function:\ndef truncate_string(value, max_length=255, suffix='...'):\n string_value = str(value)\n string_truncated = string_value[:min(len(string_value), (max_length - len(suffix)))]\n suffix = (suffix if len(string_value) > max_length else '')\n return string_truncated+suffix\n\nUsage examples:\n# Example 1 (default):\n\nlong_string = \"\"\nfor number in range(1, 1000): \n long_string += str(number) + ',' \n\nresult = truncate_string(long_string)\nprint(result)\n\n\n# Example 2 (custom length):\n\nshort_string = 'Hello world'\nresult = truncate_string(short_string, 8)\nprint(result) # > Hello... \n\n\n# Example 3 (not truncated):\n\nshort_string = 'Hello world'\nresult = truncate_string(short_string)\nprint(result) # > Hello world\n\n\n", "There's no need for a regular expression but you do want to use string formatting rather than the string concatenation in the accepted answer. \nThis is probably the most canonical, Pythonic way to truncate the string data at 75 characters.\n>>> data = \"saddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddsaddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddsadddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd\"\n>>> info = \"{}..\".format(data[:75]) if len(data) > 75 else data\n>>> info\n'111111111122222222223333333333444444444455555555556666666666777777777788888...'\n\n", "Here's a function I made as part of a new String class... It allows adding a suffix ( if the string is size after trimming and adding it is long enough - although you don't need to force the absolute size )\nI was in the process of changing a few things around so there are some useless logic costs ( if _truncate ... for instance ) where it is no longer necessary and there is a return at the top...\nBut, it is still a good function for truncating data...\n##\n## Truncate characters of a string after _len'nth char, if necessary... If _len is less than 0, don't truncate anything... Note: If you attach a suffix, and you enable absolute max length then the suffix length is subtracted from max length... Note: If the suffix length is longer than the output then no suffix is used...\n##\n## Usage: Where _text = 'Testing', _width = 4\n## _data = String.Truncate( _text, _width ) == Test\n## _data = String.Truncate( _text, _width, '..', True ) == Te..\n##\n## Equivalent Alternates: Where _text = 'Testing', _width = 4\n## _data = String.SubStr( _text, 0, _width ) == Test\n## _data = _text[ : _width ] == Test\n## _data = ( _text )[ : _width ] == Test\n##\ndef Truncate( _text, _max_len = -1, _suffix = False, _absolute_max_len = True ):\n ## Length of the string we are considering for truncation\n _len = len( _text )\n\n ## Whether or not we have to truncate\n _truncate = ( False, True )[ _len > _max_len ]\n\n ## Note: If we don't need to truncate, there's no point in proceeding...\n if ( not _truncate ):\n return _text\n\n ## The suffix in string form\n _suffix_str = ( '', str( _suffix ) )[ _truncate and _suffix != False ]\n\n ## The suffix length\n _len_suffix = len( _suffix_str )\n\n ## Whether or not we add the suffix\n _add_suffix = ( False, True )[ _truncate and _suffix != False and _max_len > _len_suffix ]\n\n ## Suffix Offset\n _suffix_offset = _max_len - _len_suffix\n _suffix_offset = ( _max_len, _suffix_offset )[ _add_suffix and _absolute_max_len != False and _suffix_offset > 0 ]\n\n ## The truncate point.... If not necessary, then length of string.. If necessary then the max length with or without subtracting the suffix length... Note: It may be easier ( less logic cost ) to simply add the suffix to the calculated point, then truncate - if point is negative then the suffix will be destroyed anyway.\n ## If we don't need to truncate, then the length is the length of the string.. If we do need to truncate, then the length depends on whether we add the suffix and offset the length of the suffix or not...\n _len_truncate = ( _len, _max_len )[ _truncate ]\n _len_truncate = ( _len_truncate, _max_len )[ _len_truncate <= _max_len ]\n\n ## If we add the suffix, add it... Suffix won't be added if the suffix is the same length as the text being output...\n if ( _add_suffix ):\n _text = _text[ 0 : _suffix_offset ] + _suffix_str + _text[ _suffix_offset: ]\n\n ## Return the text after truncating...\n return _text[ : _len_truncate ]\n\n", "Here I use textwrap.shorten and handle more edge cases. also include part of the last word in case this word is more than 50% of the max width.\nimport textwrap\n\n\ndef shorten(text: str, width=30, placeholder=\"...\"):\n \"\"\"Collapse and truncate the given text to fit in the given width.\n\n The text first has its whitespace collapsed. If it then fits in the *width*, it is returned as is.\n Otherwise, as many words as possible are joined and then the placeholder is appended.\n \"\"\"\n if not text or not isinstance(text, str):\n return str(text)\n t = text.strip()\n if len(t) <= width:\n return t\n\n # textwrap.shorten also throws ValueError if placeholder too large for max width\n shorten_words = textwrap.shorten(t, width=width, placeholder=placeholder)\n\n # textwrap.shorten doesn't split words, so if the text contains a long word without spaces, the result may be too short without this word.\n # Here we use a different way to include the start of this word in case shorten_words is less than 50% of `width`\n if len(shorten_words) - len(placeholder) < (width - len(placeholder)) * 0.5:\n return t[:width - len(placeholder)].strip() + placeholder\n return shorten_words\n\nTests:\n>>> shorten(\"123 456\", width=7, placeholder=\"...\")\n'123 456'\n>>> shorten(\"1 23 45 678 9\", width=12, placeholder=\"...\")\n'1 23 45...'\n>>> shorten(\"1 23 45 678 9\", width=10, placeholder=\"...\")\n'1 23 45...'\n>>> shorten(\"01 23456789\", width=10, placeholder=\"...\")\n'01 2345...'\n>>> shorten(\"012 3 45678901234567\", width=17, placeholder=\"...\")\n'012 3 45678901...'\n>>> shorten(\"1 23 45 678 9\", width=9, placeholder=\"...\")\n'1 23...'\n>>> shorten(\"1 23456\", width=5, placeholder=\"...\")\n'1...'\n>>> shorten(\"123 456\", width=5, placeholder=\"...\")\n'12...'\n>>> shorten(\"123 456\", width=6, placeholder=\"...\")\n'123...'\n>>> shorten(\"12 3456789\", width=9, placeholder=\"...\")\n'12 345...'\n>>> shorten(\" 12 3456789 \", width=9, placeholder=\"...\")\n'12 345...'\n>>> shorten('123 45', width=4, placeholder=\"...\")\n'1...'\n>>> shorten('123 45', width=3, placeholder=\"...\")\n'...'\n>>> shorten(\"123456\", width=3, placeholder=\"...\")\n'...'\n>>> shorten([1], width=9, placeholder=\"...\")\n'[1]'\n>>> shorten(None, width=5, placeholder=\"...\")\n'None'\n>>> shorten(\"\", width=9, placeholder=\"...\")\n''\n\n", "Suppose that stryng is a string which we wish to truncate and that nchars is the number of characters desired in the output string.\nstryng = \"sadddddddddddddddddddddddddddddddddddddddddddddddddd\"\nnchars = 10\n\nWe can truncate the string as follows:\ndef truncate(stryng:str, nchars:int):\n return (stryng[:nchars - 6] + \" [...]\")[:min(len(stryng), nchars)]\n\nThe results for certain test cases are shown below:\ns = \"sadddddddddddddddddddddddddddddd!\"\ns = \"sa\" + 30*\"d\" + \"!\"\n\ntruncate(s, 2) == sa\ntruncate(s, 4) == sadd\ntruncate(s, 10) == sadd [...]\ntruncate(s, len(s)//2) == sadddddddd [...]\n\nMy solution produces reasonable results for the test cases above.\nHowever, some pathological cases are shown below:\nSome Pathological Cases!\ntruncate(s, len(s) - 3)() == sadddddddddddddddddddddd [...]\ntruncate(s, len(s) - 2)() == saddddddddddddddddddddddd [...]\ntruncate(s, len(s) - 1)() == sadddddddddddddddddddddddd [...]\ntruncate(s, len(s) + 0)() == saddddddddddddddddddddddddd [...]\ntruncate(s, len(s) + 1)() == sadddddddddddddddddddddddddd [...\ntruncate(s, len(s) + 2)() == saddddddddddddddddddddddddddd [..\ntruncate(s, len(s) + 3)() == sadddddddddddddddddddddddddddd [.\ntruncate(s, len(s) + 4)() == saddddddddddddddddddddddddddddd [\ntruncate(s, len(s) + 5)() == sadddddddddddddddddddddddddddddd \ntruncate(s, len(s) + 6)() == sadddddddddddddddddddddddddddddd!\ntruncate(s, len(s) + 7)() == sadddddddddddddddddddddddddddddd!\ntruncate(s, 9999)() == sadddddddddddddddddddddddddddddd!\n\nNotably,\n\nWhen the string contains new-line characters (\\n) there could be an issue.\nWhen nchars > len(s) we should print string s without trying to print the \"[...]\"\n\nBelow is some more code:\nimport io\n\nclass truncate:\n \"\"\"\n Example of Code Which Uses truncate:\n ```\n s = \"\\r<class\\n 'builtin_function_or_method'>\"\n s = truncate(s, 10)()\n print(s)\n ```\n Examples of Inputs and Outputs:\n truncate(s, 2)() == \\r\n truncate(s, 4)() == \\r<c\n truncate(s, 10)() == \\r<c [...]\n truncate(s, 20)() == \\r<class\\n 'bu [...]\n truncate(s, 999)() == \\r<class\\n 'builtin_function_or_method'>\n ```\n Other Notes:\n Returns a modified copy of string input\n Does not modify the original string\n \"\"\"\n def __init__(self, x_stryng: str, x_nchars: int) -> str:\n \"\"\"\n This initializer mostly exists to sanitize function inputs\n \"\"\"\n try:\n stryng = repr(\"\".join(str(ch) for ch in x_stryng))[1:-1]\n nchars = int(str(x_nchars))\n except BaseException as exc:\n invalid_stryng = str(x_stryng)\n invalid_stryng_truncated = repr(type(self)(invalid_stryng, 20)())\n\n invalid_x_nchars = str(x_nchars)\n invalid_x_nchars_truncated = repr(type(self)(invalid_x_nchars, 20)())\n\n strm = io.StringIO()\n print(\"Invalid Function Inputs\", file=strm)\n print(type(self).__name__, \"(\",\n invalid_stryng_truncated,\n \", \",\n invalid_x_nchars_truncated, \")\", sep=\"\", file=strm)\n msg = strm.getvalue()\n\n raise ValueError(msg) from None\n\n self._stryng = stryng\n self._nchars = nchars\n\n def __call__(self) -> str:\n stryng = self._stryng\n nchars = self._nchars\n return (stryng[:nchars - 6] + \" [...]\")[:min(len(stryng), nchars)]\n\n", "Here's a simple function that will truncate a given string from either side:\ndef truncate(string, length=75, beginning=True, insert='..'):\n '''Shorten the given string to the given length.\n An ellipsis will be added to the section trimmed.\n\n :Parameters:\n length (int) = The maximum allowed length before trunicating.\n beginning (bool) = Trim starting chars, else; ending.\n insert (str) = Chars to add at the trimmed area. (default: ellipsis)\n\n :Return:\n (str)\n\n ex. call: truncate('12345678', 4)\n returns: '..5678'\n '''\n if len(string)>length:\n if beginning: #trim starting chars.\n string = insert+string[-length:]\n else: #trim ending chars.\n string = string[:length]+insert\n return string\n\n", "If you wish to do some more sophisticated string truncate you can adopt sklearn approach as implement by:\nsklearn.base.BaseEstimator.__repr__\n(See Original full code at: https://github.com/scikit-learn/scikit-learn/blob/f3f51f9b6/sklearn/base.py#L262)\nIt adds benefits such as avoiding truncate in the middle of the word.\ndef truncate_string(data, N_CHAR_MAX=70):\n # N_CHAR_MAX is the (approximate) maximum number of non-blank\n # characters to render. We pass it as an optional parameter to ease\n # the tests.\n\n lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends\n regex = r\"^(\\s*\\S){%d}\" % lim\n # The regex '^(\\s*\\S){%d}' % n\n # matches from the start of the string until the nth non-blank\n # character:\n # - ^ matches the start of string\n # - (pattern){n} matches n repetitions of pattern\n # - \\s*\\S matches a non-blank char following zero or more blanks\n left_lim = re.match(regex, data).end()\n right_lim = re.match(regex, data[::-1]).end()\n if \"\\n\" in data[left_lim:-right_lim]:\n # The left side and right side aren't on the same line.\n # To avoid weird cuts, e.g.:\n # categoric...ore',\n # we need to start the right side with an appropriate newline\n # character so that it renders properly as:\n # categoric...\n # handle_unknown='ignore',\n # so we add [^\\n]*\\n which matches until the next \\n\n regex += r\"[^\\n]*\\n\"\n right_lim = re.match(regex, data[::-1]).end()\n ellipsis = \"...\"\n if left_lim + len(ellipsis) < len(data) - right_lim:\n # Only add ellipsis if it results in a shorter repr\n data = data[:left_lim] + \"...\" + data[-right_lim:]\n return data\n\n" ]
[ 549, 173, 154, 128, 45, 15, 13, 6, 6, 6, 4, 4, 3, 2, 1, 1, 0, 0, 0, 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0002872512_python.txt
Q: Python MSAL PATCH to mark email as read CompactToken parsing failed I have a program that utilizes the MS Graph API and pulls emails received yesterday that have an attachment and have not been read. My endpoint looks like this: 'https://graph.microsoft.com/v1.0/me/mailFolders/inbox/messages?$expand=attachments&$search="hasAttachments:true AND received:yesterday AND isRead:false"' After I authenticate and pull the data needed, I'm sending the content bytes to an s3 bucket to store with this block below: result = app.acquire_token_silent(config["scope"], account=a) s3 = boto3.client('s3') bucket = config['bucket'] for email in emails: if email['hasAttachments']: attachments = email['attachments'] count = 0 for attachment in attachments: if attachment['isInline'] is False: name = attachment['name'] fileContent = base64.b64decode(attachment['contentBytes']) s3.put_object(Bucket=bucket, Key=name, Body=fileContent) count = count +1 emailid = email['id'] rp = requests.patch(f'https://graph.microsoft.com/v1.0/me/messages/{emailid}', data ={'isRead':'true'}, headers = {"Authorization": f"Bearer token= {result['access_token']}"}) print(rp) logger.info('Attachment uploaded-' + ' Attachment Size: ' + str(attachment['size']) + ' File Name: ' + attachment['name'] + ' Email Source: ' + email['from']['emailAddress']['address'] + ' Email ID: ' + email['id'] + ' Email Subject Line: ' + email['subject']) logger.info(f"Attachment count uploaded to s3: {count}") logger.info('All uploads complete') Everything works fine, except I seem unable to mark the email as read so it doesn't get pulled again with another request. This is the full error i'm getting: '{"error":{"code":"InvalidAuthenticationToken","message":"CompactToken parsing failed with error code: 80049217","innerError":{"date":"2022-11-17T18:48:54","request-id":"<xxx>","client-request-id":"<xxx>"}}}' I'm sure there is something wrong with my formatting or possibly an issue with the scopes i've given it. The only scope I have added currently is ["Mail.ReadWrite"]. Do I just have is formatted incorrectly, or is there something else going on here that's triggering the error A: It should be: requests.patch(f'https://graph.microsoft.com/v1.0/me/messages/{emailid}', json={'isRead': True}, headers={'Authorization': f'Bearer {oauth_token_access_token}'})
Python MSAL PATCH to mark email as read CompactToken parsing failed
I have a program that utilizes the MS Graph API and pulls emails received yesterday that have an attachment and have not been read. My endpoint looks like this: 'https://graph.microsoft.com/v1.0/me/mailFolders/inbox/messages?$expand=attachments&$search="hasAttachments:true AND received:yesterday AND isRead:false"' After I authenticate and pull the data needed, I'm sending the content bytes to an s3 bucket to store with this block below: result = app.acquire_token_silent(config["scope"], account=a) s3 = boto3.client('s3') bucket = config['bucket'] for email in emails: if email['hasAttachments']: attachments = email['attachments'] count = 0 for attachment in attachments: if attachment['isInline'] is False: name = attachment['name'] fileContent = base64.b64decode(attachment['contentBytes']) s3.put_object(Bucket=bucket, Key=name, Body=fileContent) count = count +1 emailid = email['id'] rp = requests.patch(f'https://graph.microsoft.com/v1.0/me/messages/{emailid}', data ={'isRead':'true'}, headers = {"Authorization": f"Bearer token= {result['access_token']}"}) print(rp) logger.info('Attachment uploaded-' + ' Attachment Size: ' + str(attachment['size']) + ' File Name: ' + attachment['name'] + ' Email Source: ' + email['from']['emailAddress']['address'] + ' Email ID: ' + email['id'] + ' Email Subject Line: ' + email['subject']) logger.info(f"Attachment count uploaded to s3: {count}") logger.info('All uploads complete') Everything works fine, except I seem unable to mark the email as read so it doesn't get pulled again with another request. This is the full error i'm getting: '{"error":{"code":"InvalidAuthenticationToken","message":"CompactToken parsing failed with error code: 80049217","innerError":{"date":"2022-11-17T18:48:54","request-id":"<xxx>","client-request-id":"<xxx>"}}}' I'm sure there is something wrong with my formatting or possibly an issue with the scopes i've given it. The only scope I have added currently is ["Mail.ReadWrite"]. Do I just have is formatted incorrectly, or is there something else going on here that's triggering the error
[ "It should be:\nrequests.patch(f'https://graph.microsoft.com/v1.0/me/messages/{emailid}', json={'isRead': True}, headers={'Authorization': f'Bearer {oauth_token_access_token}'})\n\n" ]
[ 1 ]
[]
[]
[ "microsoft_graph_api", "msal", "python", "python_requests" ]
stackoverflow_0074480887_microsoft_graph_api_msal_python_python_requests.txt
Q: Poission Distribution considering time left I want to calculate the remaining probabilities for each result in a football game at n minute. In this case I have expected goals for home team of 2.69 and away team 1.12 at 70 minute for a current result of 2-1 Code from scipy.stats import poisson from itertools import product import numpy as np import pandas as pd xgh = 2.69 xga = 1.12 minute = 70 hg, ag = 2,1 phs=[] pas=[] for i, l in zip(range(0, 6), range(0, 6)): ph = poisson.pmf(mu=xgh, k=i, loc=hg) phs.append(ph) pa = poisson.pmf(mu=xga, k=l, loc=ag) pas.append(pa) prod_table = np.array([(i*j) for i, j in product(phs, pas)]) prod_table.shape = (6, 6) prob_df = pd.DataFrame(prod_table, index=range(0,6), columns=range(0, 6)) This return a probability of 2-1 final result for 2.21% that is pretty low I expect an high probability considering only 20 minutes left A: Math considerations Poisson distribution is the probability that an event occurs k times in a given time frame, knowing that, on average, it is supposed to occur μ times in this same time frame. The postulate of Poisson distribution is that events are totally independent. So how many times it has already occurred is meaningless. And that they are uniformly distributed (If I may use this confusing word, since this is not a uniform distribution). Most of the time, Poisson's usage is to compute probability of occurrence of k events in a timeframe T, when we know that μ events occur on average in a timeframe τ (difference with 1st sentence being that T and τ are not the same). But that is the easy part: since evens are uniformly distributed, if μ events occurs on averate in a time frame τ, then μ×T/τ events shoud occur, on average, in a time frame T (understand: if we were to experiment millions of time frame T, then on average, there should be μT/τ events in each of them). So, to compute the probability that event occurs k times in time frame T, knowing that it occurs μ times in time frame τ, you just have to reply to question "how many times event occurs k times in time frame T, knowing that it occurs μT/τ times in that time time frame". Which is the question Poisson can answer. In python, that answer is poisson.pmf(k, μT/τ). In your case, you know μ, the number of goals expected in a 90 minutes time frame. You know that the time frame left to score is 20 minutes. If 2.69 goals are expected in a time frame of 90 minutes then 0.5978 goals are expected in a time frame of 20 minutes (at least, that is Poisson postulates that things work that way). Therefore, the probability for that team to score no other goal in that timeframe is poisson.pmf(0, 0.5978). Or, using your keyword style poisson.pmf(mu=0.5978, k=0). Or using loc, to have the total amount of goals poisson.pmf(mu=0.5978, k=2, loc=2) (but that is just cosmetic. Having a loc parameter just replace k by k-loc) tl;dr solution So, long story short, you just need to scale down xgh and xga so that they reflect the expected number of goals in the remaining time. for i, l in zip(range(0, 6), range(0, 6)): ph = poisson.pmf(mu=xgh*(90-minute)/90, k=i, loc=hg) phs.append(ph) pa = poisson.pmf(mu=xga*(90-minute)/90, k=l, loc=ag) pas.append(pa) Other comments zip While at it, and since there is a python tag, some comments on the code for i, l in zip(range(0, 6), range(0, 6)): print(i,l) produces 0 0 1 1 2 2 3 3 4 4 5 5 So it is quite strange not to use a single variable. Especially if you consider that there is no way you could use different ranges (zip must be used with iterables of the same length. And we don't see under which circumstances, we would need, for example, i to grow from 0 to 5, while l would grow from 0 to 10) So just for k in range(0, 6): ph = poisson.pmf(mu=xgh*(90-minute)/90, k=k, loc=hg) phs.append(ph) pa = poisson.pmf(mu=xga*(90-minute)/90, k=k, loc=ag) pas.append(pa) I surmise, especially because of what is the object of the next remark, that once upon a time, there was a product instead of that zip, before you realized that this was computing several time the same exact pmf. Cross product That usage of product has probably been then reduced to the task of computing phs[i]×pas[j] for all i,j. That is a good usage of product. But, since you have 2 arrays, and you intend to build a numpy array from those phs[i]×pas[j], let numpy do the job. It will be more efficient at it. prod_table = np.array(phs).reshape(-1,1)*np.array(pas) Getting arrays directly from Poisson Which leads to another optimization. If the goal is to transform phs and pha into arrays, so that we can mutiply them (one as a line, another as a column) to get the table, why not let numpy build that array directly. As many numpy function, pmf can have k being a list rather than a scalar, and then returns a list rather than a scalar. So phs=poisson.pmf(mu=xgh*(90-minute)/90, k=range(6), loc=hg) pas=poisson.pmf(mu=xga*(90-minute)/90, k=range(6), loc=ag) So, altogether prod_table=poisson.pmf(mu=xgh*(90-minute)/90, k=range(6), loc=hg).reshape(-1,1)*poisson.pmf(mu=xga*(90-minute)/90, k=range(6), loc=ag) Timings Optimisations Time in μs Without 1647 μs With 329 μs So, it is not just most compact and readable. It is also (almost exactly) 5 times faster.
Poission Distribution considering time left
I want to calculate the remaining probabilities for each result in a football game at n minute. In this case I have expected goals for home team of 2.69 and away team 1.12 at 70 minute for a current result of 2-1 Code from scipy.stats import poisson from itertools import product import numpy as np import pandas as pd xgh = 2.69 xga = 1.12 minute = 70 hg, ag = 2,1 phs=[] pas=[] for i, l in zip(range(0, 6), range(0, 6)): ph = poisson.pmf(mu=xgh, k=i, loc=hg) phs.append(ph) pa = poisson.pmf(mu=xga, k=l, loc=ag) pas.append(pa) prod_table = np.array([(i*j) for i, j in product(phs, pas)]) prod_table.shape = (6, 6) prob_df = pd.DataFrame(prod_table, index=range(0,6), columns=range(0, 6)) This return a probability of 2-1 final result for 2.21% that is pretty low I expect an high probability considering only 20 minutes left
[ "Math considerations\nPoisson distribution is the probability that an event occurs k times in a given time frame, knowing that, on average, it is supposed to occur μ times in this same time frame.\nThe postulate of Poisson distribution is that events are totally independent. So how many times it has already occurred is meaningless. And that they are uniformly distributed (If I may use this confusing word, since this is not a uniform distribution).\nMost of the time, Poisson's usage is to compute probability of occurrence of k events in a timeframe T, when we know that μ events occur on average in a timeframe τ (difference with 1st sentence being that T and τ are not the same).\nBut that is the easy part: since evens are uniformly distributed, if μ events occurs on averate in a time frame τ, then μ×T/τ events shoud occur, on average, in a time frame T (understand: if we were to experiment millions of time frame T, then on average, there should be μT/τ events in each of them).\nSo, to compute the probability that event occurs k times in time frame T, knowing that it occurs μ times in time frame τ, you just have to reply to question \"how many times event occurs k times in time frame T, knowing that it occurs μT/τ times in that time time frame\". Which is the question Poisson can answer.\nIn python, that answer is poisson.pmf(k, μT/τ).\nIn your case, you know μ, the number of goals expected in a 90 minutes time frame. You know that the time frame left to score is 20 minutes. If 2.69 goals are expected in a time frame of 90 minutes then 0.5978 goals are expected in a time frame of 20 minutes (at least, that is Poisson postulates that things work that way).\nTherefore, the probability for that team to score no other goal in that timeframe is poisson.pmf(0, 0.5978). Or, using your keyword style poisson.pmf(mu=0.5978, k=0). Or using loc, to have the total amount of goals poisson.pmf(mu=0.5978, k=2, loc=2) (but that is just cosmetic. Having a loc parameter just replace k by k-loc)\ntl;dr solution\nSo, long story short, you just need to scale down xgh and xga so that they reflect the expected number of goals in the remaining time.\nfor i, l in zip(range(0, 6), range(0, 6)):\n ph = poisson.pmf(mu=xgh*(90-minute)/90, k=i, loc=hg)\n phs.append(ph)\n pa = poisson.pmf(mu=xga*(90-minute)/90, k=l, loc=ag)\n pas.append(pa)\n\nOther comments\nzip\nWhile at it, and since there is a python tag, some comments on the code\nfor i, l in zip(range(0, 6), range(0, 6)):\n print(i,l)\n\nproduces\n0 0\n1 1\n2 2\n3 3\n4 4\n5 5\n\nSo it is quite strange not to use a single variable. Especially if you consider that there is no way you could use different ranges (zip must be used with iterables of the same length. And we don't see under which circumstances, we would need, for example, i to grow from 0 to 5, while l would grow from 0 to 10)\nSo just\nfor k in range(0, 6):\n ph = poisson.pmf(mu=xgh*(90-minute)/90, k=k, loc=hg)\n phs.append(ph)\n pa = poisson.pmf(mu=xga*(90-minute)/90, k=k, loc=ag)\n pas.append(pa)\n\nI surmise, especially because of what is the object of the next remark, that once upon a time, there was a product instead of that zip, before you realized that this was computing several time the same exact pmf.\nCross product\nThat usage of product has probably been then reduced to the task of computing phs[i]×pas[j] for all i,j. That is a good usage of product.\nBut, since you have 2 arrays, and you intend to build a numpy array from those phs[i]×pas[j], let numpy do the job. It will be more efficient at it.\nprod_table = np.array(phs).reshape(-1,1)*np.array(pas)\n\nGetting arrays directly from Poisson\nWhich leads to another optimization. If the goal is to transform phs and pha into arrays, so that we can mutiply them (one as a line, another as a column) to get the table, why not let numpy build that array directly. As many numpy function, pmf can have k being a list rather than a scalar, and then returns a list rather than a scalar.\nSo\nphs=poisson.pmf(mu=xgh*(90-minute)/90, k=range(6), loc=hg)\npas=poisson.pmf(mu=xga*(90-minute)/90, k=range(6), loc=ag)\n\nSo, altogether\nprod_table=poisson.pmf(mu=xgh*(90-minute)/90, k=range(6), loc=hg).reshape(-1,1)*poisson.pmf(mu=xga*(90-minute)/90, k=range(6), loc=ag)\n\nTimings\n\n\n\n\nOptimisations\nTime in μs\n\n\n\n\nWithout\n1647 μs\n\n\nWith\n329 μs\n\n\n\n\nSo, it is not just most compact and readable. It is also (almost exactly) 5 times faster.\n" ]
[ 1 ]
[]
[]
[ "poisson", "probability", "probability_distribution", "python" ]
stackoverflow_0074507895_poisson_probability_probability_distribution_python.txt
Q: how to us the prefix if there are two forms and one submit button? I try to upload two forms with one submit button. A user can select a pdf file and a excel file. And then uploading both files. And then the contents of both are returned. So I try to upload both files with one submit button. But the two selected file options are not visible for uploading the files. So I have the template like this: {% extends 'base.html' %} {% load static %} {% block content %} <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>Create a Profile</title> <script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script> <link rel="stylesheet" type="text/css" href="{% static 'main/css/custom-style.css' %}" /> <link rel="stylesheet" type="text/css" href="{% static 'main/css/bootstrap.css' %}" /> </head> <body> <div class="container center"> <span class="form-inline" role="form"> <div class="inline-div"> <form class="form-inline" action="/controlepunt140" method="POST" enctype="multipart/form-data"> <div class="d-grid gap-3"> <div class="form-group"> {% csrf_token %} {{ form.0.as_p }} <button type="submit" name="form_pdf" class="btn btn-warning">Upload!</button> </div> <div class="form-outline"> <div class="form-group"> <textarea class="inline-txtarea form-control" id="content" cols="70" rows="25"> {{content}}</textarea> </div> </div> </div> <div class="d-grid gap-3"> <div class="form-group"> {{ form.1.as_p }} </div> <div class="form-outline"> <div class="form-group"> <textarea class="inline-txtarea form-control" id="content" cols="70" rows="25"> {{conten_excel}}</textarea> </div> </div> </div> </form> </div> </span> </div> </body> </html> {% endblock content %} and the views.py: class ReadingFile(View): def get(self, *args, **kwargs): return render(self.request, "main/controle_punt140.html", { "form1": UploadFileForm(), "form2": ExcelForm() }) def post(self, *args, **kwargs): filter_text = FilterText() types_of_encoding = ["utf8", "cp1252"] form1 = UploadFileForm( self.request.POST, self.request.FILES, prefix="form1") form2 = ExcelForm(self.request.FILES, self.request.FILES, prefix="form2") content = '' content_excel = '' if form1.is_valid() and form2.is_valid() and self.request.POST: uploadfile = UploadFile(image=self.request.FILES["upload_file"]) excel_file = self.request.FILES["upload_file"] uploadfile.save() for encoding_type in types_of_encoding: with open(os.path.join(settings.MEDIA_ROOT, f"{uploadfile.image}"), 'r', encoding=encoding_type) as f: if uploadfile.image.path.endswith('.pdf'): content = filter_text.show_extracted_data_from_file( uploadfile.image.path) else: content = f.read() if uploadfile.image.path.endswith('xlsx'): wb = openpyxl.load_workbook(excel_file) worksheet = wb['Sheet1'] print(worksheet) excel_data = list() for row in worksheet.iter_rows(): row_data = list() for cell in row: row_data.append(str(cell.value)) excel_data.append(row_data) print(excel_data) content_excel = excel_data else: content_excel = f.read() return render(self.request, "main/controle_punt140.html", { 'form1': ExcelForm(), 'form2': UploadFileForm(), "content": [content, content_excel] }) # I've adjusted the indent here to what I think it should be. return render(self.request, "main/controle_punt140.html", { "form1": form1, "form2": form2, }) and forms.py: class UploadFileForm(forms.Form): upload_file = forms.FileField(required=False) class ExcelForm(forms.Form): upload_file = forms.FileField(required=False) urls.py: urlpatterns = [ path('', views.starting_page, name='starting_page'), path('controlepunt140', views.ReadingFile.as_view(), name='controlepunt140'), ] A: The variable name used in the template is the key of the dictionary, not the value. The value is what is inserted into the template when django renders the page. You have {{form1.as__p}} in your template, but you send "form": [form1, form2] as your context, so the variable in the template should be {{ form.0.as_p }} and {{ form.1.as_p }}. I haven't tested this, but if it doesn't work, you could just send the two forms separately like: from django.shortcuts import redirect class ReadingFile(View): def get(self, *args, **kwargs): return render(self.request, "main/controle_punt140.html", { "form1": UploadFileForm(), "form2": ExcelForm() }) def post(self, *args, **kwargs): filter_text = FilterText() types_of_encoding = ["utf8", "cp1252"] form1 = UploadFileForm(self.request.POST, self.request.FILES, prefix="form1") form2 = ExcelForm(self.request.FILES, self.request.FILES, prefix="form2") content = '' content_excel = '' if form1.is_valid() and form2.is_valid() and self.request.POST: uploadfile = UploadFile(image=self.request.FILES["upload_file"]) excel_file = self.request.FILES["upload_file"] uploadfile.save() for encoding_type in types_of_encoding: with open(os.path.join(settings.MEDIA_ROOT, f"{uploadfile.image}"), 'r', encoding=encoding_type) as f: if uploadfile.image.path.endswith('.pdf'): content = filter_text.show_extracted_data_from_file( uploadfile.image.path) else: content = f.read() if uploadfile.image.path.endswith('xlsx'): #Uploading excel form: #this is just logic. pass else: content_excel = f.read() # You probably should do a redirect after the form is # submitted, rather than render the page. return redirect('main:controlepunt140') # return render(self.request, "main/controle_punt140.html", { 'form1': ExcelForm(), 'form2': UploadFileForm(), "content": [content, content_excel] }) # I've adjusted the indent here to what I think it should be. return render(self.request, "main/controle_punt140.html", { "form1": form1, "form2": form2, }) You probable should also change to a redirect after the form is submitted and saved successfully. Check out Post/Redirect/Get and/or rendering content after a succesful post request. Edit Changed template to use {{ form.0.as_p }} as indicated by @nigel239 You can redirect to the same page where the form was submitted, so if the user hits the refresh button on their browser for some reason, you will not get an alert box asking the user to resend the form.
how to us the prefix if there are two forms and one submit button?
I try to upload two forms with one submit button. A user can select a pdf file and a excel file. And then uploading both files. And then the contents of both are returned. So I try to upload both files with one submit button. But the two selected file options are not visible for uploading the files. So I have the template like this: {% extends 'base.html' %} {% load static %} {% block content %} <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <title>Create a Profile</title> <script src="https://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script> <link rel="stylesheet" type="text/css" href="{% static 'main/css/custom-style.css' %}" /> <link rel="stylesheet" type="text/css" href="{% static 'main/css/bootstrap.css' %}" /> </head> <body> <div class="container center"> <span class="form-inline" role="form"> <div class="inline-div"> <form class="form-inline" action="/controlepunt140" method="POST" enctype="multipart/form-data"> <div class="d-grid gap-3"> <div class="form-group"> {% csrf_token %} {{ form.0.as_p }} <button type="submit" name="form_pdf" class="btn btn-warning">Upload!</button> </div> <div class="form-outline"> <div class="form-group"> <textarea class="inline-txtarea form-control" id="content" cols="70" rows="25"> {{content}}</textarea> </div> </div> </div> <div class="d-grid gap-3"> <div class="form-group"> {{ form.1.as_p }} </div> <div class="form-outline"> <div class="form-group"> <textarea class="inline-txtarea form-control" id="content" cols="70" rows="25"> {{conten_excel}}</textarea> </div> </div> </div> </form> </div> </span> </div> </body> </html> {% endblock content %} and the views.py: class ReadingFile(View): def get(self, *args, **kwargs): return render(self.request, "main/controle_punt140.html", { "form1": UploadFileForm(), "form2": ExcelForm() }) def post(self, *args, **kwargs): filter_text = FilterText() types_of_encoding = ["utf8", "cp1252"] form1 = UploadFileForm( self.request.POST, self.request.FILES, prefix="form1") form2 = ExcelForm(self.request.FILES, self.request.FILES, prefix="form2") content = '' content_excel = '' if form1.is_valid() and form2.is_valid() and self.request.POST: uploadfile = UploadFile(image=self.request.FILES["upload_file"]) excel_file = self.request.FILES["upload_file"] uploadfile.save() for encoding_type in types_of_encoding: with open(os.path.join(settings.MEDIA_ROOT, f"{uploadfile.image}"), 'r', encoding=encoding_type) as f: if uploadfile.image.path.endswith('.pdf'): content = filter_text.show_extracted_data_from_file( uploadfile.image.path) else: content = f.read() if uploadfile.image.path.endswith('xlsx'): wb = openpyxl.load_workbook(excel_file) worksheet = wb['Sheet1'] print(worksheet) excel_data = list() for row in worksheet.iter_rows(): row_data = list() for cell in row: row_data.append(str(cell.value)) excel_data.append(row_data) print(excel_data) content_excel = excel_data else: content_excel = f.read() return render(self.request, "main/controle_punt140.html", { 'form1': ExcelForm(), 'form2': UploadFileForm(), "content": [content, content_excel] }) # I've adjusted the indent here to what I think it should be. return render(self.request, "main/controle_punt140.html", { "form1": form1, "form2": form2, }) and forms.py: class UploadFileForm(forms.Form): upload_file = forms.FileField(required=False) class ExcelForm(forms.Form): upload_file = forms.FileField(required=False) urls.py: urlpatterns = [ path('', views.starting_page, name='starting_page'), path('controlepunt140', views.ReadingFile.as_view(), name='controlepunt140'), ]
[ "The variable name used in the template is the key of the dictionary, not the value. The value is what is inserted into the template when django renders the page.\nYou have {{form1.as__p}} in your template, but you send \"form\": [form1, form2] as your context, so the variable in the template should be {{ form.0.as_p }} and {{ form.1.as_p }}. I haven't tested this, but if it doesn't work, you could just send the two forms separately like:\nfrom django.shortcuts import redirect\n\nclass ReadingFile(View):\n def get(self, *args, **kwargs):\n return render(self.request, \"main/controle_punt140.html\", {\n \"form1\": UploadFileForm(),\n \"form2\": ExcelForm()\n })\n\n def post(self, *args, **kwargs):\n filter_text = FilterText()\n types_of_encoding = [\"utf8\", \"cp1252\"]\n form1 = UploadFileForm(self.request.POST, self.request.FILES, prefix=\"form1\")\n form2 = ExcelForm(self.request.FILES, self.request.FILES, prefix=\"form2\")\n content = ''\n content_excel = ''\n\n if form1.is_valid() and form2.is_valid() and self.request.POST:\n uploadfile = UploadFile(image=self.request.FILES[\"upload_file\"])\n excel_file = self.request.FILES[\"upload_file\"]\n\n uploadfile.save()\n\n for encoding_type in types_of_encoding:\n with open(os.path.join(settings.MEDIA_ROOT, f\"{uploadfile.image}\"), 'r', encoding=encoding_type) as f:\n if uploadfile.image.path.endswith('.pdf'):\n content = filter_text.show_extracted_data_from_file(\n uploadfile.image.path)\n else:\n content = f.read()\n\n if uploadfile.image.path.endswith('xlsx'):\n \n #Uploading excel form:\n #this is just logic. \n pass\n\n else:\n content_excel = f.read()\n \n # You probably should do a redirect after the form is\n # submitted, rather than render the page.\n return redirect('main:controlepunt140')\n # return render(self.request, \"main/controle_punt140.html\", {\n 'form1': ExcelForm(), \n 'form2': UploadFileForm(),\n \"content\": [content, content_excel]\n })\n \n # I've adjusted the indent here to what I think it should be.\n return render(self.request, \"main/controle_punt140.html\", {\n \"form1\": form1, \n \"form2\": form2,\n })\n\n\nYou probable should also change to a redirect after the form is submitted and saved successfully. Check out Post/Redirect/Get and/or rendering content after a succesful post request.\nEdit\nChanged template to use {{ form.0.as_p }} as indicated by @nigel239\nYou can redirect to the same page where the form was submitted, so if the user hits the refresh button on their browser for some reason, you will not get an alert box asking the user to resend the form.\n" ]
[ 1 ]
[]
[]
[ "django", "python" ]
stackoverflow_0074508785_django_python.txt
Q: How do I implement a range function in this program? I am making a program that allows students to predict their progression at the end of each academic year. ble 1: Progression outcomes as defined by the University regulations. Volume of Credit at Each Level fist digit is Pass second digit is Defer third digit is Fail i have already implmented this in to my program. however ive been asked to implement a range function in the the program so if anyone enters anything other then 0, 20, 40,60, 80,100 and 120, they should get an error that says "not in range" and asks them again to input the numbers again. print("Welcome to University of Westminster grade calculator") while True: passCR = input("Enter your pass credits") if passCR.isdigit(): passCR = int(passCR) break else: print("Not an integer Value!try again") while True: deferCR = input("Enter your defer credits") if deferCR.isdigit(): deferCR = int(deferCR) break else: print("Not an integer Value!try again") while True: failCR = input("Enter your fail credits") if failCR.isdigit(): failCR = int(failCR) break else: print("Not an integer Value!try again") def input_valid_number(which="pass"): while True: n = input("enter your {} credits: ".format(which)) try: n = int(n) if 0 <= n <= 120 and (n % 20) == 0: return n except: pass while True: passCR = input_valid_number("pass") deferCR = input_valid_number("defer") failCR = input_valid_number("fail") if sum([passCR, deferCR, failCR]) == 120: break print("Your Total Credits do not add up to 120. Please try again!") if passCR == 120 and deferCR == 0 and failCR== 0: #1 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("progress") elif passCR == 100 and deferCR == 20 and failCR== 0: #2 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("progress - module trailer") elif passCR == 100 and deferCR == 0 and failCR== 20: #3 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("progress - module trailer") elif passCR == 80 and deferCR == 40 and failCR== 0: #4 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 80 and deferCR == 20 and failCR== 20: #5 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 80 and deferCR == 0 and failCR== 40: #6 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 60 and deferCR == 60 and failCR== 0: #7 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 60 and deferCR == 40 and failCR== 20: #8 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 60 and deferCR == 20 and failCR== 40: #9 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 60 and deferCR == 0 and failCR== 60: #10 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 80 and failCR== 0: #11 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 60 and failCR== 20: #12 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 40 and failCR== 40: #13 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 20 and failCR== 60: #14 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 0 and failCR== 80: #15 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") elif passCR == 20 and deferCR == 100 and failCR== 0: #16 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 20 and deferCR == 80 and failCR== 20: #17 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 20 and deferCR == 60 and failCR== 40: #18 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 20 and deferCR == 40 and failCR== 60: #19 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 20 and deferCR == 20 and failCR== 80: #20 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") elif passCR == 20 and deferCR == 0 and failCR== 100: #21 print("Exclude",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 120 and failCR== 0: #22 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 100 and failCR== 20: #23 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 80 and failCR== 40: #24 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 60 and failCR== 60: #25 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 40 and failCR== 80: #26 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") elif passCR == 0 and deferCR == 20 and failCR== 100: #27 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") elif passCR == 0 and deferCR == 0 and failCR== 120: #28 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") 1-how can i implement a range function that only allows the user to input 0,20,40,60,80,100,120? 2-the program should also let the user know if the total sum of pass,defer and fail is not 120, then they should get a message "total incorrect". then rerun the same question. PS i have included images of my program below for a clearer image. https://imgur.com/0PzgE3J https://imgur.com/sSURE4h A: For part 1, a simple func can test that the input value exists between the two end values and is a multiple of 20. The 'mod' func is good for the multiple part def input_valid_number(which="pass"): while True: n = input("Enter your {} credits: ".format(which)).strip() if n.isdigit(): n = int(n) if 0 <= n <= 120 and (n % 20) == 0: return n else: print("Invalid Selection. Please enter 0, 20, 40, 60, 80, 100, or 120!") else: print("Not an Integer Value. Please try again!") For part 2, the below should do the trick... while True: passCR = input_valid_number("pass") deferCR = input_valid_number("defer") failCR = input_valid_number("fail") if sum([passCR, deferCR, failCR]) == 120: break print("Your Total Credits do not add up to 120. Please try again!") A: Doing the same westminster uni task but, as late as I am, it would be more efficent to just check the number of credits for pass as no matter the other numbers the pass credits are the decider. For example all combinations with pass credit 100 have their outcome as 'Progress module trailer'
How do I implement a range function in this program?
I am making a program that allows students to predict their progression at the end of each academic year. ble 1: Progression outcomes as defined by the University regulations. Volume of Credit at Each Level fist digit is Pass second digit is Defer third digit is Fail i have already implmented this in to my program. however ive been asked to implement a range function in the the program so if anyone enters anything other then 0, 20, 40,60, 80,100 and 120, they should get an error that says "not in range" and asks them again to input the numbers again. print("Welcome to University of Westminster grade calculator") while True: passCR = input("Enter your pass credits") if passCR.isdigit(): passCR = int(passCR) break else: print("Not an integer Value!try again") while True: deferCR = input("Enter your defer credits") if deferCR.isdigit(): deferCR = int(deferCR) break else: print("Not an integer Value!try again") while True: failCR = input("Enter your fail credits") if failCR.isdigit(): failCR = int(failCR) break else: print("Not an integer Value!try again") def input_valid_number(which="pass"): while True: n = input("enter your {} credits: ".format(which)) try: n = int(n) if 0 <= n <= 120 and (n % 20) == 0: return n except: pass while True: passCR = input_valid_number("pass") deferCR = input_valid_number("defer") failCR = input_valid_number("fail") if sum([passCR, deferCR, failCR]) == 120: break print("Your Total Credits do not add up to 120. Please try again!") if passCR == 120 and deferCR == 0 and failCR== 0: #1 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("progress") elif passCR == 100 and deferCR == 20 and failCR== 0: #2 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("progress - module trailer") elif passCR == 100 and deferCR == 0 and failCR== 20: #3 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("progress - module trailer") elif passCR == 80 and deferCR == 40 and failCR== 0: #4 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 80 and deferCR == 20 and failCR== 20: #5 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 80 and deferCR == 0 and failCR== 40: #6 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 60 and deferCR == 60 and failCR== 0: #7 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 60 and deferCR == 40 and failCR== 20: #8 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 60 and deferCR == 20 and failCR== 40: #9 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 60 and deferCR == 0 and failCR== 60: #10 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 80 and failCR== 0: #11 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 60 and failCR== 20: #12 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 40 and failCR== 40: #13 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 20 and failCR== 60: #14 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 40 and deferCR == 0 and failCR== 80: #15 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") elif passCR == 20 and deferCR == 100 and failCR== 0: #16 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 20 and deferCR == 80 and failCR== 20: #17 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 20 and deferCR == 60 and failCR== 40: #18 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 20 and deferCR == 40 and failCR== 60: #19 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 20 and deferCR == 20 and failCR== 80: #20 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") elif passCR == 20 and deferCR == 0 and failCR== 100: #21 print("Exclude",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 120 and failCR== 0: #22 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 100 and failCR== 20: #23 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 80 and failCR== 40: #24 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 60 and failCR== 60: #25 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Do not progress - module trailer") elif passCR == 0 and deferCR == 40 and failCR== 80: #26 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") elif passCR == 0 and deferCR == 20 and failCR== 100: #27 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") elif passCR == 0 and deferCR == 0 and failCR== 120: #28 print("Your pass, defer and fail credits are",passCR, deferCR, failCR) print ("Exclude") 1-how can i implement a range function that only allows the user to input 0,20,40,60,80,100,120? 2-the program should also let the user know if the total sum of pass,defer and fail is not 120, then they should get a message "total incorrect". then rerun the same question. PS i have included images of my program below for a clearer image. https://imgur.com/0PzgE3J https://imgur.com/sSURE4h
[ "For part 1, a simple func can test that the input value exists between the two end values and is a multiple of 20. The 'mod' func is good for the multiple part\ndef input_valid_number(which=\"pass\"):\n while True:\n n = input(\"Enter your {} credits: \".format(which)).strip()\n if n.isdigit():\n n = int(n)\n if 0 <= n <= 120 and (n % 20) == 0:\n return n\n else:\n print(\"Invalid Selection. Please enter 0, 20, 40, 60, 80, 100, or 120!\")\n else:\n print(\"Not an Integer Value. Please try again!\")\n\nFor part 2, the below should do the trick...\nwhile True:\n passCR = input_valid_number(\"pass\")\n deferCR = input_valid_number(\"defer\")\n failCR = input_valid_number(\"fail\")\n if sum([passCR, deferCR, failCR]) == 120:\n break\n print(\"Your Total Credits do not add up to 120. Please try again!\")\n\n", "Doing the same westminster uni task but, as late as I am, it would be more efficent to just check the number of credits for pass as no matter the other numbers the pass credits are the decider. For example all combinations with pass credit 100 have their outcome as 'Progress module trailer'\n" ]
[ 0, 0 ]
[]
[]
[ "integer", "python", "range" ]
stackoverflow_0058791012_integer_python_range.txt
Q: Creating simple password cracker using numpy arrays I'm Trying to create a (number) password cracker function using numpy arrays instead of for-loops. What can I add to my cracker function to avoid this error? (See image of code attached) Image of my code I want the cracker function to return the value in the 'possible' array that returns 'Correct' when used as the argument in the password function. A: You can refer to my way def password(correctedpassword): if 13 in correctedpassword: return "Correct" else: return "Incorrect" def cracker(testrange): possible = np.linspace(0,testrange,testrange+1) return password(possible) Output when call function cracker(100): 'Correct' Output when call function cracker(12): 'Incorrect' A: Please do not post pictures of your code, for future reference. Paste and format it so that another user can copy it and help you easily. What you need is to apply the function password() on every element of the array possible, and to return the index where the value is correct. The simplest way would be to do it with a loop: def cracker(testrange): possible = np.linspace(0, testrange, testrange + 1, dtype=int) results = [p for p in possible if password(p) == "Correct"] if len(results): return results[0] Alternatively, you can also use numpy's vectorize function: def cracker(testrange): possible = np.linspace(0, testrange, testrange + 1, dtype=int) results = possible[np.vectorize(password)(possible) == "Correct"] if len(results): return results[0] Explanation Simply put, the function you had, password, takes a single number, checks it, and returns a single output. But you need to do this not for just a single number, but a whole array of numbers (possible). np.vectorize helps you do exactly this; it is no different from a for loop. I will further break down the steps in results = possible[np.vectorize(password)(possible) == "Correct"]: check_passwords = np.vectorize(password) # check_passwords is now a new function which will work on an array. # Basically it is like running the old function password() multiple # times inside a loop output = check_passwords(possible) # array containing ['Incorrect', 'Incorrect', ... , 'Correct', ... , 'Incorrect'] position = output == 'Correct' # array containing [False, False, ... , True, ... , False] result = possible[output] # gives the elements of possible which was 'Correct' return result[0]
Creating simple password cracker using numpy arrays
I'm Trying to create a (number) password cracker function using numpy arrays instead of for-loops. What can I add to my cracker function to avoid this error? (See image of code attached) Image of my code I want the cracker function to return the value in the 'possible' array that returns 'Correct' when used as the argument in the password function.
[ "You can refer to my way\ndef password(correctedpassword):\n if 13 in correctedpassword:\n return \"Correct\"\n else:\n return \"Incorrect\"\n \ndef cracker(testrange):\n possible = np.linspace(0,testrange,testrange+1)\n return password(possible)\n\nOutput when call function cracker(100):\n'Correct'\n\nOutput when call function cracker(12):\n'Incorrect'\n\n", "Please do not post pictures of your code, for future reference. Paste and format it so that another user can copy it and help you easily.\n\nWhat you need is to apply the function password() on every element of the array possible, and to return the index where the value is correct. The simplest way would be to do it with a loop:\ndef cracker(testrange):\n possible = np.linspace(0, testrange, testrange + 1, dtype=int)\n results = [p for p in possible if password(p) == \"Correct\"]\n if len(results):\n return results[0]\n\n\nAlternatively, you can also use numpy's vectorize function:\ndef cracker(testrange):\n possible = np.linspace(0, testrange, testrange + 1, dtype=int)\n results = possible[np.vectorize(password)(possible) == \"Correct\"]\n if len(results):\n return results[0]\n\nExplanation\nSimply put, the function you had, password, takes a single number, checks it, and returns a single output. But you need to do this not for just a single number, but a whole array of numbers (possible). np.vectorize helps you do exactly this; it is no different from a for loop. I will further break down the steps in results = possible[np.vectorize(password)(possible) == \"Correct\"]:\ncheck_passwords = np.vectorize(password)\n# check_passwords is now a new function which will work on an array.\n# Basically it is like running the old function password() multiple\n# times inside a loop\n\noutput = check_passwords(possible)\n# array containing ['Incorrect', 'Incorrect', ... , 'Correct', ... , 'Incorrect']\n\nposition = output == 'Correct'\n# array containing [False, False, ... , True, ... , False]\n\nresult = possible[output]\n# gives the elements of possible which was 'Correct'\n\nreturn result[0]\n\n" ]
[ 0, 0 ]
[]
[]
[ "numpy", "python", "python_3.x" ]
stackoverflow_0074508661_numpy_python_python_3.x.txt
Q: Selenium: trying to upload two files but three or more files have been uploaded I tried to add photo to Facebook marketplace in here with selenium python like this: driver.find_element(By.XPATH, '//input[@type="file"]').send_keys('C:/image.jpg') when I try to send one photo, one photo have been sent, the problem is when I try to send two or more photo like this: driver.get('https://www.facebook.com/marketplace/create/item') for ko in range(0, 2): driver.find_element(By.XPATH, '//input[@type="file"]').send_keys('C:/img_from_url.jpg') three photos have been sent, I don't know where is the problem, so i want where I send two photo, only two photo have been sent, this is my simple code, please help me, any answer will be appreciated A: This seems to be a bug. When uploading a file with a send_keys() method in a loop the file is being uploaded twice. I.e. if you performing send keys 2 times the file file be uploaded 4 time, for 3 iterations 6 files will be uploaded etc. Currently I see no solution for this issue. F.e. adding a delay inside the loop doesn't resolve it. I see similar question was already asked but no solution was given there too.
Selenium: trying to upload two files but three or more files have been uploaded
I tried to add photo to Facebook marketplace in here with selenium python like this: driver.find_element(By.XPATH, '//input[@type="file"]').send_keys('C:/image.jpg') when I try to send one photo, one photo have been sent, the problem is when I try to send two or more photo like this: driver.get('https://www.facebook.com/marketplace/create/item') for ko in range(0, 2): driver.find_element(By.XPATH, '//input[@type="file"]').send_keys('C:/img_from_url.jpg') three photos have been sent, I don't know where is the problem, so i want where I send two photo, only two photo have been sent, this is my simple code, please help me, any answer will be appreciated
[ "This seems to be a bug.\nWhen uploading a file with a send_keys() method in a loop the file is being uploaded twice.\nI.e. if you performing send keys 2 times the file file be uploaded 4 time, for 3 iterations 6 files will be uploaded etc.\nCurrently I see no solution for this issue. F.e. adding a delay inside the loop doesn't resolve it.\nI see similar question was already asked but no solution was given there too.\n" ]
[ 1 ]
[]
[]
[ "file_upload", "python", "selenium", "selenium_webdriver" ]
stackoverflow_0074508781_file_upload_python_selenium_selenium_webdriver.txt
Q: How to add on the left and on the right order like 1 to 8 Hi I have small problem I dont know how to add oder like from 1 to number 8 on the right and on the left of this program.Here is the list but How to add numbers on the left and on the righ. I did this with letters up and down Here is my code sachy = [[0, 1, 0, 1, 0, 1, 0, 1], [1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 2, 0, 2, 0, 2, 0, 2], [2, 0, 2, 0, 2, 0, 2, 0], [0, 2, 0, 2, 0, 2, 0, 2]] poradi = ["a", "b", "c", "d", "e", "f", "g", "h"] poradi_2 = [1, 2, 3, 4, 5, 6, 7, 8] for prvek in poradi: print(prvek, end=" ") print() print() for seznam in sachy: for prvek in seznam: print(prvek, end=" ") print(end="\n", ) print() for prvek in poradi: print(prvek, end=" ") I try to write another list of order from 1 to 8 into the seznam but will always multiple by 8 becaouse of sachy that are 8x8. A: You need to pair the row indices with the row itself, also use " ".join() for shorted code print(" ", " ".join(poradi), "\n") for idx, seznam in zip(poradi_2, sachy): print(idx, " ".join(map(str, seznam)), idx) print("\n ", " ".join(poradi), "\n\n") a b c d e f g h 1 0 1 0 1 0 1 0 1 1 2 1 0 1 0 1 0 1 0 2 3 0 1 0 1 0 1 0 1 3 4 0 0 0 0 0 0 0 0 4 5 0 0 0 0 0 0 0 0 5 6 0 2 0 2 0 2 0 2 6 7 2 0 2 0 2 0 2 0 7 8 0 2 0 2 0 2 0 2 8 a b c d e f g h
How to add on the left and on the right order like 1 to 8
Hi I have small problem I dont know how to add oder like from 1 to number 8 on the right and on the left of this program.Here is the list but How to add numbers on the left and on the righ. I did this with letters up and down Here is my code sachy = [[0, 1, 0, 1, 0, 1, 0, 1], [1, 0, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 2, 0, 2, 0, 2, 0, 2], [2, 0, 2, 0, 2, 0, 2, 0], [0, 2, 0, 2, 0, 2, 0, 2]] poradi = ["a", "b", "c", "d", "e", "f", "g", "h"] poradi_2 = [1, 2, 3, 4, 5, 6, 7, 8] for prvek in poradi: print(prvek, end=" ") print() print() for seznam in sachy: for prvek in seznam: print(prvek, end=" ") print(end="\n", ) print() for prvek in poradi: print(prvek, end=" ") I try to write another list of order from 1 to 8 into the seznam but will always multiple by 8 becaouse of sachy that are 8x8.
[ "You need to pair the row indices with the row itself, also use \" \".join() for shorted code\nprint(\" \", \" \".join(poradi), \"\\n\")\n\nfor idx, seznam in zip(poradi_2, sachy):\n print(idx, \" \".join(map(str, seznam)), idx)\n\nprint(\"\\n \", \" \".join(poradi), \"\\n\\n\")\n\n a b c d e f g h\n\n1 0 1 0 1 0 1 0 1 1\n2 1 0 1 0 1 0 1 0 2\n3 0 1 0 1 0 1 0 1 3\n4 0 0 0 0 0 0 0 0 4\n5 0 0 0 0 0 0 0 0 5\n6 0 2 0 2 0 2 0 2 6\n7 2 0 2 0 2 0 2 0 7\n8 0 2 0 2 0 2 0 2 8\n\n a b c d e f g h\n\n" ]
[ 0 ]
[]
[]
[ "list", "numbers", "python" ]
stackoverflow_0074509279_list_numbers_python.txt
Q: How can I access and manage iterables inside each pandas.DataFrame column? I have the following JSON file: { "IMG1.tif": { "0": [ 100, 192, [ 129, 42, 32 ] ], "1": [ 299, 208, [ 133, 42, 24 ] ] }, "IMG2.tif": { "0": [ 100, 207, [ 128, 41, 34 ] ], "1": [ 299, 192, [ 81, 25, 26 ] ] } } I'm reading into a dataframe with df = pd.read_json('img_data.json', orient = 'columns'). I find that this is a clear and logical way to store the information I want to store, but I want to access each of the values for each column and be able to iterate across/work with them. For example, in this case, these values are coordinates. I'd like to, in the most convenient and natural way possible, be able to access the x, y or z axis value(s) for every coordinate in each column, i.e. (something like): >>> df["IMG1.tif"][0,:] 0 100 1 299 or even filter across the whole dataframe: >>> get_y_values(df) IMG1.tif IMG2.tif 0 192 207 1 208 192 I also accept suggestions on how to change the way the data is stored (it may be necessary), but I don't think I can store values outside lists because of the way they're obtained - meaning that, as you can see, "IMG.1.tif": { "0": [100, 192, [129, 42, 32]] ... each 3-set of coordinates in the dataframe is shown inside a list. In case some of you are curious or confused, z axis values are just RGB values. At some point I will need to transform them into grayscale inside the database, too: >>> do_grayscale(df) # example values IMG1.tif IMG2.tif 0 [100, 192, 61] [100, 207, 87] 1 [299, 208, 122] [299, 192, 94] Added: one of the alternative ways to have the original data stored, albeit with sacrifices in the original code, would be something like this: x y z image_name 0 100 192 [129, 42, 32] IMG1.tif 1 299 208 [133, 42, 24] IMG1.tif 2 100 207 [128, 41, 34] IMG2.tif 3 299 192 [81, 25, 26] IMG2.tif A: I'd suggest building a dataframe with multiindex columns: df = df.T # first transpose your df df_out = pd.concat([ pd.DataFrame(df[col].tolist(), index=df.index, columns=pd.MultiIndex.from_tuples(zip([col]*3, ["x", "y", "z"])) ) for col in df.columns ], axis=1 ) This will give you the following df: 0 1 x y z x y z IMG1.tif 100 192 [129, 42, 32] 299 208 [133, 42, 24] IMG2.tif 100 207 [128, 41, 34] 299 192 [81, 25, 26] You can then access any element of your frame with the locmethod. For instance: df_out.loc['IMG1.tif', (0, "y") # returns 192 df_out.loc['IMG1.tif', ([0, 1], "x")] # returns a series with 100 and 299 df_out.loc[:, ([0, 1], "y")] # will get you all y values (granted you have only 0 and 1... edit accordingly) Edit: if 0 and 1 are not relevant as index and you want the structure of your last example: df_out = pd.concat([ pd.DataFrame(df[col].tolist(), columns=["x", "y", "z"]).assign(image_name=col) for col in df.columns ]).reset_index(drop=True) (Edit 2: directly iterating over df columns) Output: x y z image_name 0 100 192 [129, 42, 32] IMG1.tif 1 299 208 [133, 42, 24] IMG1.tif 2 100 207 [128, 41, 34] IMG2.tif 3 299 192 [81, 25, 26] IMG2.tif
How can I access and manage iterables inside each pandas.DataFrame column?
I have the following JSON file: { "IMG1.tif": { "0": [ 100, 192, [ 129, 42, 32 ] ], "1": [ 299, 208, [ 133, 42, 24 ] ] }, "IMG2.tif": { "0": [ 100, 207, [ 128, 41, 34 ] ], "1": [ 299, 192, [ 81, 25, 26 ] ] } } I'm reading into a dataframe with df = pd.read_json('img_data.json', orient = 'columns'). I find that this is a clear and logical way to store the information I want to store, but I want to access each of the values for each column and be able to iterate across/work with them. For example, in this case, these values are coordinates. I'd like to, in the most convenient and natural way possible, be able to access the x, y or z axis value(s) for every coordinate in each column, i.e. (something like): >>> df["IMG1.tif"][0,:] 0 100 1 299 or even filter across the whole dataframe: >>> get_y_values(df) IMG1.tif IMG2.tif 0 192 207 1 208 192 I also accept suggestions on how to change the way the data is stored (it may be necessary), but I don't think I can store values outside lists because of the way they're obtained - meaning that, as you can see, "IMG.1.tif": { "0": [100, 192, [129, 42, 32]] ... each 3-set of coordinates in the dataframe is shown inside a list. In case some of you are curious or confused, z axis values are just RGB values. At some point I will need to transform them into grayscale inside the database, too: >>> do_grayscale(df) # example values IMG1.tif IMG2.tif 0 [100, 192, 61] [100, 207, 87] 1 [299, 208, 122] [299, 192, 94] Added: one of the alternative ways to have the original data stored, albeit with sacrifices in the original code, would be something like this: x y z image_name 0 100 192 [129, 42, 32] IMG1.tif 1 299 208 [133, 42, 24] IMG1.tif 2 100 207 [128, 41, 34] IMG2.tif 3 299 192 [81, 25, 26] IMG2.tif
[ "I'd suggest building a dataframe with multiindex columns:\ndf = df.T # first transpose your df\n\ndf_out = pd.concat([\n pd.DataFrame(df[col].tolist(), index=df.index,\n columns=pd.MultiIndex.from_tuples(zip([col]*3, [\"x\", \"y\", \"z\"]))\n ) for col in df.columns\n], axis=1\n)\n\nThis will give you the following df:\n 0 1 \n x y z x y z\nIMG1.tif 100 192 [129, 42, 32] 299 208 [133, 42, 24]\nIMG2.tif 100 207 [128, 41, 34] 299 192 [81, 25, 26]\n\nYou can then access any element of your frame with the locmethod. For instance:\ndf_out.loc['IMG1.tif', (0, \"y\") # returns 192\ndf_out.loc['IMG1.tif', ([0, 1], \"x\")] # returns a series with 100 and 299\ndf_out.loc[:, ([0, 1], \"y\")] # will get you all y values (granted you have only 0 and 1... edit accordingly)\n\nEdit: if 0 and 1 are not relevant as index and you want the structure of your last example:\ndf_out = pd.concat([\n pd.DataFrame(df[col].tolist(), columns=[\"x\", \"y\", \"z\"]).assign(image_name=col)\n for col in df.columns\n]).reset_index(drop=True)\n\n(Edit 2: directly iterating over df columns)\nOutput:\n x y z image_name\n0 100 192 [129, 42, 32] IMG1.tif\n1 299 208 [133, 42, 24] IMG1.tif\n2 100 207 [128, 41, 34] IMG2.tif\n3 299 192 [81, 25, 26] IMG2.tif\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074509039_dataframe_pandas_python.txt
Q: Python transform data long to wide I'm looking to transform some data in Python. Originally, in column 1 there are various identifiers (A to E in this example) associated with towns in column 2. There is a separate row for each identifier and town association. There can be any number of identifier to town associations. I'd like to end up with ONE row per identifier and with all the associated towns going horizontally separated by commas. Tried using long to wide but having difficulty in doing the above, appreciate any suggestions. Thank you A: One way to do it is using gruopby. For example, you can group by Column 1 and apply a function that returns the list of unique values for each group (i.e. each code). import numpy as np import pandas as pd df = pd.DataFrame({ 'col1': 'A A A A B B C C C D E E E E E'.split(' '), 'col2': ['Accrington', 'Acle', 'Suffolk', 'Hampshire', 'Lincolnshire', 'Derbyshire', 'Aldershot', 'Alford', 'Cumbria', 'Hampshire', 'Bath', 'Alston', 'Greater Manchester', 'Northumberland', 'Cumbria'], }) def get_towns(town_list): return ', '.join(np.unique(town_list)) df.groupby('col1')['col2'].apply(get_towns) And the result is: col1 A Accrington, Acle, Hampshire, Suffolk B Derbyshire, Lincolnshire C Aldershot, Alford, Cumbria D Hampshire E Alston, Bath, Cumbria, Greater Manchester, Nor... Name: col2, dtype: object Note: the last line contains also Cumbria, differently from you expected results as this value appears also with the code E. I guess that was a typo in your question... A: Another option is to use .groupby with aggregate because conceptually, this is not a pivoting operation but, well, an aggregation (concatenation) of values. This solution is quite similar to Luca Clissa's answer, but it uses the pandas api instead of numpy. >>> df.groupby("col1").col2.agg(list) col1 A [Accrington, Acle, Suffolk, Hampshire] B [Lincolnshire, Derbyshire] C [Aldershot, Alford, Cumbria] D [Hampshire] E [Bath, Alston, Greater Manchester, Northumberl... Name: col2, dtype: object That gives you cells of lists; if you need strings, add a .str.join(", "): >>> df.groupby("col1").col2.agg(list).str.join(", ") col1 A Accrington, Acle, Suffolk, Hampshire B Lincolnshire, Derbyshire C Aldershot, Alford, Cumbria D Hampshire E Bath, Alston, Greater Manchester, Northumberla... Name: col2, dtype: object If you want col1 as a normal column instead of an index, add a .reset_index() at the end.
Python transform data long to wide
I'm looking to transform some data in Python. Originally, in column 1 there are various identifiers (A to E in this example) associated with towns in column 2. There is a separate row for each identifier and town association. There can be any number of identifier to town associations. I'd like to end up with ONE row per identifier and with all the associated towns going horizontally separated by commas. Tried using long to wide but having difficulty in doing the above, appreciate any suggestions. Thank you
[ "One way to do it is using gruopby. For example, you can group by Column 1 and apply a function that returns the list of unique values for each group (i.e. each code).\nimport numpy as np\nimport pandas as pd\ndf = pd.DataFrame({\n 'col1': 'A A A A B B C C C D E E E E E'.split(' '),\n 'col2': ['Accrington', 'Acle', 'Suffolk', 'Hampshire', 'Lincolnshire',\n 'Derbyshire', 'Aldershot', 'Alford', 'Cumbria', 'Hampshire', 'Bath',\n 'Alston', 'Greater Manchester', 'Northumberland', 'Cumbria'],\n})\n\ndef get_towns(town_list):\n return ', '.join(np.unique(town_list))\n\ndf.groupby('col1')['col2'].apply(get_towns)\n\n\nAnd the result is:\ncol1\nA Accrington, Acle, Hampshire, Suffolk\nB Derbyshire, Lincolnshire\nC Aldershot, Alford, Cumbria\nD Hampshire\nE Alston, Bath, Cumbria, Greater Manchester, Nor...\nName: col2, dtype: object\n\nNote: the last line contains also Cumbria, differently from you expected results as this value appears also with the code E. I guess that was a typo in your question...\n", "Another option is to use .groupby with aggregate because conceptually, this is not a pivoting operation but, well, an aggregation (concatenation) of values. This solution is quite similar to Luca Clissa's answer, but it uses the pandas api instead of numpy.\n>>> df.groupby(\"col1\").col2.agg(list)\ncol1\nA [Accrington, Acle, Suffolk, Hampshire]\nB [Lincolnshire, Derbyshire]\nC [Aldershot, Alford, Cumbria]\nD [Hampshire]\nE [Bath, Alston, Greater Manchester, Northumberl...\nName: col2, dtype: object\n\nThat gives you cells of lists; if you need strings, add a .str.join(\", \"):\n>>> df.groupby(\"col1\").col2.agg(list).str.join(\", \")\ncol1\nA Accrington, Acle, Suffolk, Hampshire\nB Lincolnshire, Derbyshire\nC Aldershot, Alford, Cumbria\nD Hampshire\nE Bath, Alston, Greater Manchester, Northumberla...\nName: col2, dtype: object\n\nIf you want col1 as a normal column instead of an index, add a .reset_index() at the end.\n" ]
[ 2, 2 ]
[]
[]
[ "python" ]
stackoverflow_0074508861_python.txt
Q: Python, Unicode, and the Windows console When I try to print a Unicode string in a Windows console, I get an error . UnicodeEncodeError: 'charmap' codec can't encode character .... I assume this is because the Windows console does not accept Unicode-only characters. What's the best way around this? Is there any way I can make Python automatically print a ? instead of failing in this situation? Edit: I'm using Python 2.5. Note: @LasseV.Karlsen answer with the checkmark is sort of outdated (from 2008). Please use the solutions/answers/suggestions below with care!! @JFSebastian answer is more relevant as of today (6 Jan 2016). A: Update: Python 3.6 implements PEP 528: Change Windows console encoding to UTF-8: the default console on Windows will now accept all Unicode characters. Internally, it uses the same Unicode API as the win-unicode-console package mentioned below. print(unicode_string) should just work now. I get a UnicodeEncodeError: 'charmap' codec can't encode character... error. The error means that Unicode characters that you are trying to print can't be represented using the current (chcp) console character encoding. The codepage is often 8-bit encoding such as cp437 that can represent only ~0x100 characters from ~1M Unicode characters: >>> u"\N{EURO SIGN}".encode('cp437') Traceback (most recent call last): ... UnicodeEncodeError: 'charmap' codec can't encode character '\u20ac' in position 0: character maps to I assume this is because the Windows console does not accept Unicode-only characters. What's the best way around this? Windows console does accept Unicode characters and it can even display them (BMP only) if the corresponding font is configured. WriteConsoleW() API should be used as suggested in @Daira Hopwood's answer. It can be called transparently i.e., you don't need to and should not modify your scripts if you use win-unicode-console package: T:\> py -m pip install win-unicode-console T:\> py -m run your_script.py See What's the deal with Python 3.4, Unicode, different languages and Windows? Is there any way I can make Python automatically print a ? instead of failing in this situation? If it is enough to replace all unencodable characters with ? in your case then you could set PYTHONIOENCODING envvar: T:\> set PYTHONIOENCODING=:replace T:\> python3 -c "print(u'[\N{EURO SIGN}]')" [?] In Python 3.6+, the encoding specified by PYTHONIOENCODING envvar is ignored for interactive console buffers unless PYTHONLEGACYWINDOWSIOENCODING envvar is set to a non-empty string. A: Note: This answer is sort of outdated (from 2008). Please use the solution below with care!! Here is a page that details the problem and a solution (search the page for the text Wrapping sys.stdout into an instance): PrintFails - Python Wiki Here's a code excerpt from that page: $ python -c 'import sys, codecs, locale; print sys.stdout.encoding; \ sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout); \ line = u"\u0411\n"; print type(line), len(line); \ sys.stdout.write(line); print line' UTF-8 <type 'unicode'> 2 Б Б $ python -c 'import sys, codecs, locale; print sys.stdout.encoding; \ sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout); \ line = u"\u0411\n"; print type(line), len(line); \ sys.stdout.write(line); print line' | cat None <type 'unicode'> 2 Б Б There's some more information on that page, well worth a read. A: Update: On Python 3.6 or later, printing Unicode strings to the console on Windows just works. So, upgrade to recent Python and you're done. At this point I recommend using 2to3 to update your code to Python 3.x if needed, and just dropping support for Python 2.x. Note that there has been no security support for any version of Python before 3.7 (including Python 2.7) since December 2021. If you really still need to support earlier versions of Python (including Python 2.7), you can use https://github.com/Drekin/win-unicode-console , which is based on, and uses the same APIs as the code in the answer that was previously linked here. (That link does include some information on Windows font configuration but I doubt it still applies to Windows 8 or later.) Note: despite other plausible-sounding answers that suggest changing the code page to 65001, that did not work prior to Python 3.8. (It does kind-of work since then, but as pointed out above, you don't need to do so for Python 3.6+ anyway.) Also, changing the default encoding using sys.setdefaultencoding is (still) not a good idea. A: If you're not interested in getting a reliable representation of the bad character(s) you might use something like this (working with python >= 2.6, including 3.x): from __future__ import print_function import sys def safeprint(s): try: print(s) except UnicodeEncodeError: if sys.version_info >= (3,): print(s.encode('utf8').decode(sys.stdout.encoding)) else: print(s.encode('utf8')) safeprint(u"\N{EM DASH}") The bad character(s) in the string will be converted in a representation which is printable by the Windows console. A: The below code will make Python output to console as UTF-8 even on Windows. The console will display the characters well on Windows 7 but on Windows XP it will not display them well, but at least it will work and most important you will have a consistent output from your script on all platforms. You'll be able to redirect the output to a file. Below code was tested with Python 2.6 on Windows. #!/usr/bin/python # -*- coding: UTF-8 -*- import codecs, sys reload(sys) sys.setdefaultencoding('utf-8') print sys.getdefaultencoding() if sys.platform == 'win32': try: import win32console except: print "Python Win32 Extensions module is required.\n You can download it from https://sourceforge.net/projects/pywin32/ (x86 and x64 builds are available)\n" exit(-1) # win32console implementation of SetConsoleCP does not return a value # CP_UTF8 = 65001 win32console.SetConsoleCP(65001) if (win32console.GetConsoleCP() != 65001): raise Exception ("Cannot set console codepage to 65001 (UTF-8)") win32console.SetConsoleOutputCP(65001) if (win32console.GetConsoleOutputCP() != 65001): raise Exception ("Cannot set console output codepage to 65001 (UTF-8)") #import sys, codecs sys.stdout = codecs.getwriter('utf8')(sys.stdout) sys.stderr = codecs.getwriter('utf8')(sys.stderr) print "This is an Е乂αmp١ȅ testing Unicode support using Arabic, Latin, Cyrillic, Greek, Hebrew and CJK code points.\n" A: Just enter this code in command line before executing python script: chcp 65001 & set PYTHONIOENCODING=utf-8 A: Like Giampaolo Rodolà's answer, but even more dirty: I really, really intend to spend a long time (soon) understanding the whole subject of encodings and how they apply to Windoze consoles, For the moment I just wanted sthg which would mean my program would NOT CRASH, and which I understood ... and also which didn't involve importing too many exotic modules (in particular I'm using Jython, so half the time a Python module turns out not in fact to be available). def pr(s): try: print(s) except UnicodeEncodeError: for c in s: try: print( c, end='') except UnicodeEncodeError: print( '?', end='') NB "pr" is shorter to type than "print" (and quite a bit shorter to type than "safeprint")...! A: Kind of related on the answer by J. F. Sebastian, but more direct. If you are having this problem when printing to the console/terminal, then do this: >set PYTHONIOENCODING=UTF-8 A: For Python 2 try: print unicode(string, 'unicode-escape') For Python 3 try: import os string = "002 Could've Would've Should've" os.system('echo ' + string) Or try win-unicode-console: pip install win-unicode-console py -mrun your_script.py A: TL;DR: print(yourstring.encode('ascii','replace').decode('ascii')) I ran into this myself, working on a Twitch chat (IRC) bot. (Python 2.7 latest) I wanted to parse chat messages in order to respond... msg = s.recv(1024).decode("utf-8") but also print them safely to the console in a human-readable format: print(msg.encode('ascii','replace').decode('ascii')) This corrected the issue of the bot throwing UnicodeEncodeError: 'charmap' errors and replaced the unicode characters with ?. A: The cause of your problem is NOT the Win console not willing to accept Unicode (as it does this since I guess Win2k by default). It is the default system encoding. Try this code and see what it gives you: import sys sys.getdefaultencoding() if it says ascii, there's your cause ;-) You have to create a file called sitecustomize.py and put it under python path (I put it under /usr/lib/python2.5/site-packages, but that is differen on Win - it is c:\python\lib\site-packages or something), with the following contents: import sys sys.setdefaultencoding('utf-8') and perhaps you might want to specify the encoding in your files as well: # -*- coding: UTF-8 -*- import sys,time Edit: more info can be found in excellent the Dive into Python book A: Python 3.6 windows7: There is several way to launch a python you could use the python console (which has a python logo on it) or the windows console (it's written cmd.exe on it). I could not print utf8 characters in the windows console. Printing utf-8 characters throw me this error: OSError: [winError 87] The paraneter is incorrect Exception ignored in: (_io-TextIOwrapper name='(stdout)' mode='w' ' encoding='utf8') OSError: [WinError 87] The parameter is incorrect After trying and failing to understand the answer above I discovered it was only a setting problem. Right click on the top of the cmd console windows, on the tab font chose lucida console. A: Nowadays, the Windows console does not encounter this error, unless you redirect the output. Here is an example Python script scratch_1.py: s = "∞" print(s) If you run the script as follows, everything works as intended: python scratch_1.py ∞ However, if you run the following, then you get the same error as in the question: python scratch_1.py > temp.txt Traceback (most recent call last): File "C:\Users\Wok\AppData\Roaming\JetBrains\PyCharmCE2022.2\scratches\scratch_1.py", line 3, in <module> print(s) File "C:\Users\Wok\AppData\Local\Programs\Python\Python311\Lib\encodings\cp1252.py", line 19, in encode return codecs.charmap_encode(input,self.errors,encoding_table)[0] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ UnicodeEncodeError: 'charmap' codec can't encode character '\u221e' in position 0: character maps to <undefined> To solve this issue with the suggestion present in the original question, i.e. by replacing the erroneous characters with question marks ?, one can proceed as follows: s = "∞" try: print(s) except UnicodeEncodeError: output_str = s.encode("ascii", errors="replace").decode("ascii") print(output_str) It is important: to call decode(), so that the type of the output is str instead of bytes, with the same encoding, here "ascii", to avoid the creation of mojibake.
Python, Unicode, and the Windows console
When I try to print a Unicode string in a Windows console, I get an error . UnicodeEncodeError: 'charmap' codec can't encode character .... I assume this is because the Windows console does not accept Unicode-only characters. What's the best way around this? Is there any way I can make Python automatically print a ? instead of failing in this situation? Edit: I'm using Python 2.5. Note: @LasseV.Karlsen answer with the checkmark is sort of outdated (from 2008). Please use the solutions/answers/suggestions below with care!! @JFSebastian answer is more relevant as of today (6 Jan 2016).
[ "Update: Python 3.6 implements PEP 528: Change Windows console encoding to UTF-8: the default console on Windows will now accept all Unicode characters. Internally, it uses the same Unicode API as the win-unicode-console package mentioned below. print(unicode_string) should just work now.\n\n\nI get a UnicodeEncodeError: 'charmap' codec can't encode character... error.\n\nThe error means that Unicode characters that you are trying to print can't be represented using the current (chcp) console character encoding. The codepage is often 8-bit encoding such as cp437 that can represent only ~0x100 characters from ~1M Unicode characters:\n>>> u\"\\N{EURO SIGN}\".encode('cp437')\nTraceback (most recent call last):\n...\nUnicodeEncodeError: 'charmap' codec can't encode character '\\u20ac' in position 0:\ncharacter maps to \n\nI assume this is because the Windows console does not accept Unicode-only characters. What's the best way around this?\n\nWindows console does accept Unicode characters and it can even display them (BMP only) if the corresponding font is configured. WriteConsoleW() API should be used as suggested in @Daira Hopwood's answer. It can be called transparently i.e., you don't need to and should not modify your scripts if you use win-unicode-console package:\nT:\\> py -m pip install win-unicode-console\nT:\\> py -m run your_script.py\n\nSee What's the deal with Python 3.4, Unicode, different languages and Windows?\n\nIs there any way I can make Python\nautomatically print a ? instead of failing in this situation?\n\nIf it is enough to replace all unencodable characters with ? in your case then you could set PYTHONIOENCODING envvar:\nT:\\> set PYTHONIOENCODING=:replace\nT:\\> python3 -c \"print(u'[\\N{EURO SIGN}]')\"\n[?]\n\nIn Python 3.6+, the encoding specified by PYTHONIOENCODING envvar is ignored for interactive console buffers unless PYTHONLEGACYWINDOWSIOENCODING envvar is set to a non-empty string.\n", "Note: This answer is sort of outdated (from 2008). Please use the solution below with care!!\n\nHere is a page that details the problem and a solution (search the page for the text Wrapping sys.stdout into an instance):\nPrintFails - Python Wiki\nHere's a code excerpt from that page:\n$ python -c 'import sys, codecs, locale; print sys.stdout.encoding; \\\n sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout); \\\n line = u\"\\u0411\\n\"; print type(line), len(line); \\\n sys.stdout.write(line); print line'\n UTF-8\n <type 'unicode'> 2\n Б\n Б\n\n $ python -c 'import sys, codecs, locale; print sys.stdout.encoding; \\\n sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout); \\\n line = u\"\\u0411\\n\"; print type(line), len(line); \\\n sys.stdout.write(line); print line' | cat\n None\n <type 'unicode'> 2\n Б\n Б\n\nThere's some more information on that page, well worth a read.\n", "Update: On Python 3.6 or later, printing Unicode strings to the console on Windows just works.\nSo, upgrade to recent Python and you're done. At this point I recommend using 2to3 to update your code to Python 3.x if needed, and just dropping support for Python 2.x. Note that there has been no security support for any version of Python before 3.7 (including Python 2.7) since December 2021.\nIf you really still need to support earlier versions of Python (including Python 2.7), you can use https://github.com/Drekin/win-unicode-console , which is based on, and uses the same APIs as the code in the answer that was previously linked here. (That link does include some information on Windows font configuration but I doubt it still applies to Windows 8 or later.)\nNote: despite other plausible-sounding answers that suggest changing the code page to 65001, that did not work prior to Python 3.8. (It does kind-of work since then, but as pointed out above, you don't need to do so for Python 3.6+ anyway.) Also, changing the default encoding using sys.setdefaultencoding is (still) not a good idea.\n", "If you're not interested in getting a reliable representation of the bad character(s) you might use something like this (working with python >= 2.6, including 3.x):\nfrom __future__ import print_function\nimport sys\n\ndef safeprint(s):\n try:\n print(s)\n except UnicodeEncodeError:\n if sys.version_info >= (3,):\n print(s.encode('utf8').decode(sys.stdout.encoding))\n else:\n print(s.encode('utf8'))\n\nsafeprint(u\"\\N{EM DASH}\")\n\nThe bad character(s) in the string will be converted in a representation which is printable by the Windows console.\n", "The below code will make Python output to console as UTF-8 even on Windows. \nThe console will display the characters well on Windows 7 but on Windows XP it will not display them well, but at least it will work and most important you will have a consistent output from your script on all platforms. You'll be able to redirect the output to a file.\nBelow code was tested with Python 2.6 on Windows.\n\n#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport codecs, sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nprint sys.getdefaultencoding()\n\nif sys.platform == 'win32':\n try:\n import win32console \n except:\n print \"Python Win32 Extensions module is required.\\n You can download it from https://sourceforge.net/projects/pywin32/ (x86 and x64 builds are available)\\n\"\n exit(-1)\n # win32console implementation of SetConsoleCP does not return a value\n # CP_UTF8 = 65001\n win32console.SetConsoleCP(65001)\n if (win32console.GetConsoleCP() != 65001):\n raise Exception (\"Cannot set console codepage to 65001 (UTF-8)\")\n win32console.SetConsoleOutputCP(65001)\n if (win32console.GetConsoleOutputCP() != 65001):\n raise Exception (\"Cannot set console output codepage to 65001 (UTF-8)\")\n\n#import sys, codecs\nsys.stdout = codecs.getwriter('utf8')(sys.stdout)\nsys.stderr = codecs.getwriter('utf8')(sys.stderr)\n\nprint \"This is an Е乂αmp١ȅ testing Unicode support using Arabic, Latin, Cyrillic, Greek, Hebrew and CJK code points.\\n\"\n\n", "Just enter this code in command line before executing python script:\nchcp 65001 & set PYTHONIOENCODING=utf-8\n\n", "Like Giampaolo Rodolà's answer, but even more dirty: I really, really intend to spend a long time (soon) understanding the whole subject of encodings and how they apply to Windoze consoles, \nFor the moment I just wanted sthg which would mean my program would NOT CRASH, and which I understood ... and also which didn't involve importing too many exotic modules (in particular I'm using Jython, so half the time a Python module turns out not in fact to be available).\ndef pr(s):\n try:\n print(s)\n except UnicodeEncodeError:\n for c in s:\n try:\n print( c, end='')\n except UnicodeEncodeError:\n print( '?', end='')\n\nNB \"pr\" is shorter to type than \"print\" (and quite a bit shorter to type than \"safeprint\")...!\n", "Kind of related on the answer by J. F. Sebastian, but more direct.\nIf you are having this problem when printing to the console/terminal, then do this:\n>set PYTHONIOENCODING=UTF-8\n\n", "For Python 2 try:\nprint unicode(string, 'unicode-escape')\n\nFor Python 3 try:\nimport os\nstring = \"002 Could've Would've Should've\"\nos.system('echo ' + string)\n\nOr try win-unicode-console:\npip install win-unicode-console\npy -mrun your_script.py\n\n", "TL;DR:\nprint(yourstring.encode('ascii','replace').decode('ascii'))\n\n\nI ran into this myself, working on a Twitch chat (IRC) bot. (Python 2.7 latest)\nI wanted to parse chat messages in order to respond...\nmsg = s.recv(1024).decode(\"utf-8\")\n\nbut also print them safely to the console in a human-readable format:\nprint(msg.encode('ascii','replace').decode('ascii'))\n\nThis corrected the issue of the bot throwing UnicodeEncodeError: 'charmap' errors and replaced the unicode characters with ?.\n", "The cause of your problem is NOT the Win console not willing to accept Unicode (as it does this since I guess Win2k by default). It is the default system encoding. Try this code and see what it gives you:\nimport sys\nsys.getdefaultencoding()\n\nif it says ascii, there's your cause ;-)\nYou have to create a file called sitecustomize.py and put it under python path (I put it under /usr/lib/python2.5/site-packages, but that is differen on Win - it is c:\\python\\lib\\site-packages or something), with the following contents:\nimport sys\nsys.setdefaultencoding('utf-8')\n\nand perhaps you might want to specify the encoding in your files as well:\n# -*- coding: UTF-8 -*-\nimport sys,time\n\nEdit: more info can be found in excellent the Dive into Python book\n", "Python 3.6 windows7: There is several way to launch a python you could use the python console (which has a python logo on it) or the windows console (it's written cmd.exe on it). \nI could not print utf8 characters in the windows console. Printing utf-8 characters throw me this error:\nOSError: [winError 87] The paraneter is incorrect \nException ignored in: (_io-TextIOwrapper name='(stdout)' mode='w' ' encoding='utf8') \nOSError: [WinError 87] The parameter is incorrect \n\nAfter trying and failing to understand the answer above I discovered it was only a setting problem. Right click on the top of the cmd console windows, on the tab font chose lucida console.\n", "Nowadays, the Windows console does not encounter this error, unless you redirect the output.\nHere is an example Python script scratch_1.py:\ns = \"∞\"\n\nprint(s)\n\nIf you run the script as follows, everything works as intended:\npython scratch_1.py\n\n∞\n\nHowever, if you run the following, then you get the same error as in the question:\npython scratch_1.py > temp.txt\n\nTraceback (most recent call last):\n File \"C:\\Users\\Wok\\AppData\\Roaming\\JetBrains\\PyCharmCE2022.2\\scratches\\scratch_1.py\", line 3, in <module>\n print(s)\n File \"C:\\Users\\Wok\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\encodings\\cp1252.py\", line 19, in encode\n return codecs.charmap_encode(input,self.errors,encoding_table)[0]\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nUnicodeEncodeError: 'charmap' codec can't encode character '\\u221e' in position 0: character maps to <undefined>\n\n\nTo solve this issue with the suggestion present in the original question, i.e. by replacing the erroneous characters with question marks ?, one can proceed as follows:\ns = \"∞\"\n\ntry:\n print(s)\nexcept UnicodeEncodeError:\n output_str = s.encode(\"ascii\", errors=\"replace\").decode(\"ascii\")\n\n print(output_str)\n\nIt is important:\n\nto call decode(), so that the type of the output is str instead of bytes,\nwith the same encoding, here \"ascii\", to avoid the creation of mojibake.\n\n" ]
[ 86, 39, 29, 11, 10, 6, 5, 2, 2, 2, 1, 1, 0 ]
[ "James Sulak asked,\n\nIs there any way I can make Python automatically print a ? instead of failing in this situation?\n\nOther solutions recommend we attempt to modify the Windows environment or replace Python's print() function. The answer below comes closer to fulfilling Sulak's request.\nUnder Windows 7, Python 3.5 can be made to print Unicode without throwing a UnicodeEncodeError as follows:\n    In place of:\n   print(text)\n    substitute:\n    print(str(text).encode('utf-8'))\nInstead of throwing an exception, Python now displays unprintable Unicode characters as \\xNN hex codes, e.g.:\n  Halmalo n\\xe2\\x80\\x99\\xc3\\xa9tait plus qu\\xe2\\x80\\x99un point noir\nInstead of\n  Halmalo n’était plus qu’un point noir\nGranted, the latter is preferable ceteris paribus, but otherwise the former is completely accurate for diagnostic messages. Because it displays Unicode as literal byte values the former may also assist in diagnosing encode/decode problems.\nNote: The str() call above is needed because otherwise encode() causes Python to reject a Unicode character as a tuple of numbers.\n", "The issue is with windows default encoding being set to cp1252, and need to be set to utf-8. (check PEP)\nCheck default encoding using:\nimport locale \nlocale.getpreferredencoding()\n\nYou can override locale settings\nimport os\nif os.name == \"nt\":\n import _locale\n _locale._gdl_bak = _locale._getdefaultlocale\n _locale._getdefaultlocale = (lambda *args: (_locale._gdl_bak()[0], 'utf8'))\n\nreferenced code from stack link\n" ]
[ -1, -1 ]
[ "python", "unicode" ]
stackoverflow_0000005419_python_unicode.txt
Q: "assert" statement with or without parentheses Here are four simple invocations of assert: >>> assert 1==2 Traceback (most recent call last): File "<stdin>", line 1, in ? AssertionError >>> assert 1==2, "hi" Traceback (most recent call last): File "<stdin>", line 1, in ? AssertionError: hi >>> assert(1==2) Traceback (most recent call last): File "<stdin>", line 1, in ? AssertionError >>> assert(1==2, "hi") Note that the last one does not raise an error. What is the difference between calling assert with or without parenthesis that causes this behavior? My practice is to use parenthesis, but the above suggests that I should not. A: The last assert would have given you a warning (SyntaxWarning: assertion is always true, perhaps remove parentheses?) if you ran it through a full interpreter, not through IDLE. Because assert is a keyword and not a function, you are actually passing in a tuple as the first argument and leaving off the second argument. Recall that non-empty tuples evaluate to True, and since the assertion message is optional, you've essentially called assert True when you wrote assert(1==2, "hi"). A: If you put the parenthesis in there because you wanted a multi-line assert, then an alternative is to put a backslash at the end of the line like this: foo = 7 assert foo == 8, \ "derp should be 8, it is " + str(foo) Prints: AssertionError: "derp should be 8, it is 7 Why does this python assert have to be different from everything else: I think the pythonic ideology is that a program should self-correct without having to worry about the special flag to turn on asserts. The temptation to turn off asserts is too great, and thus it's being deprecated. I share your annoyance that the python assert has unique syntax relative to all other python programming constructs, and this syntax has yet again changed from python2 to python3 and again changed from python 3.4 to 3.6. Making assert statements not backward compatible from any version to any other version. It's a tap on the shoulder that assert is a 3rd class citizen, it will be totally removed in python4, and certainly again in Python 8.1. A: You can break assert statement without \ like this: foo = 7 assert foo == 8, ( 'derp should be 8, it is ' + str(foo)) Or if you have even longer message: foo = 7 assert foo == 8, ( 'Lorem Ipsum is simply dummy text of the printing and typesetting ' 'industry. Lorem Ipsum has been the industry\'s standard dummy text ' 'ever since the 1500s' ) A: assert 1==2, "hi" is parsed as assert 1==2, "hi" with "hi" as the second parameter for the keyword. Hence why it properly gives an error. assert(1==2) is parsed as assert (1==2) which is identical to assert 1==2, because parens around a single item don't create a tuple unless there's a trailing comma e.g. (1==2,). assert(1==2, "hi") is parsed as assert (1==2, "hi"), which doesn't give an error because a non-empty tuple (False, "hi") isn't a false value, and there is no second parameter supplied to the keyword. You shouldn't use parentheses because assert is not a function in Python - it's a keyword. A: Following is cited from the python doc Assert statements are a convenient way to insert debugging assertions into a program: assert_stmt ::= "assert" expression ["," expression] The simple form, assert expression, is equivalent to if __debug__: if not expression: raise AssertionError The extended form, assert expression1, expression2, is equivalent to if __debug__: if not expression1: raise AssertionError(expression2) So when you're using parenthesis here, you're using the simple form, and the expression is evaluated as a tuple, which is always True when being casted to bool A: assert statement with or without parentheses as shown below are the same: assert (x == 3) assert x == 3 And, other statements such as if, while, for and del with or without parentheses as shown below are also the same: if (x == "Hello"): if x == "Hello": while (x == 3): while x == 3: for (x) in (fruits): for x in fruits: del (x) del x In addition, basically, most example python code which I've seen so far doesn't use parentheses for assert, if, while, for and del statements so I prefer not using parentheses for them.
"assert" statement with or without parentheses
Here are four simple invocations of assert: >>> assert 1==2 Traceback (most recent call last): File "<stdin>", line 1, in ? AssertionError >>> assert 1==2, "hi" Traceback (most recent call last): File "<stdin>", line 1, in ? AssertionError: hi >>> assert(1==2) Traceback (most recent call last): File "<stdin>", line 1, in ? AssertionError >>> assert(1==2, "hi") Note that the last one does not raise an error. What is the difference between calling assert with or without parenthesis that causes this behavior? My practice is to use parenthesis, but the above suggests that I should not.
[ "The last assert would have given you a warning (SyntaxWarning: assertion is always true, perhaps remove parentheses?) if you ran it through a full interpreter, not through IDLE. Because assert is a keyword and not a function, you are actually passing in a tuple as the first argument and leaving off the second argument.\nRecall that non-empty tuples evaluate to True, and since the assertion message is optional, you've essentially called assert True when you wrote assert(1==2, \"hi\").\n", "If you put the parenthesis in there because you wanted a multi-line assert, then an alternative is to put a backslash at the end of the line like this:\nfoo = 7\nassert foo == 8, \\\n \"derp should be 8, it is \" + str(foo)\n\nPrints: \nAssertionError: \"derp should be 8, it is 7\n\nWhy does this python assert have to be different from everything else:\nI think the pythonic ideology is that a program should self-correct without having to worry about the special flag to turn on asserts. The temptation to turn off asserts is too great, and thus it's being deprecated.\nI share your annoyance that the python assert has unique syntax relative to all other python programming constructs, and this syntax has yet again changed from python2 to python3 and again changed from python 3.4 to 3.6. \n Making assert statements not backward compatible from any version to any other version.\nIt's a tap on the shoulder that assert is a 3rd class citizen, it will be totally removed in python4, and certainly again in Python 8.1.\n", "You can break assert statement without \\ like this:\nfoo = 7\nassert foo == 8, (\n 'derp should be 8, it is ' + str(foo))\n\nOr if you have even longer message:\nfoo = 7\nassert foo == 8, (\n 'Lorem Ipsum is simply dummy text of the printing and typesetting '\n 'industry. Lorem Ipsum has been the industry\\'s standard dummy text '\n 'ever since the 1500s'\n)\n\n", "assert 1==2, \"hi\" is parsed as assert 1==2, \"hi\" with \"hi\" as the second parameter for the keyword. Hence why it properly gives an error.\nassert(1==2) is parsed as assert (1==2) which is identical to assert 1==2, because parens around a single item don't create a tuple unless there's a trailing comma e.g. (1==2,).\nassert(1==2, \"hi\") is parsed as assert (1==2, \"hi\"), which doesn't give an error because a non-empty tuple (False, \"hi\") isn't a false value, and there is no second parameter supplied to the keyword.\nYou shouldn't use parentheses because assert is not a function in Python - it's a keyword.\n", "Following is cited from the python doc\n\nAssert statements are a convenient way to insert debugging assertions into a program:\nassert_stmt ::= \"assert\" expression [\",\" expression] \n\nThe simple form, assert expression, is equivalent to\n\nif __debug__:\n if not expression: raise AssertionError\n\nThe extended form, assert expression1, expression2, is equivalent to\n\nif __debug__:\n if not expression1: raise AssertionError(expression2)\n\nSo when you're using parenthesis here, you're using the simple form, and the expression is evaluated as a tuple, which is always True when being casted to bool\n", "assert statement with or without parentheses as shown below are the same:\nassert (x == 3)\n\nassert x == 3\n\n\nAnd, other statements such as if, while, for and del with or without parentheses as shown below are also the same:\n\nif (x == \"Hello\"):\n\nif x == \"Hello\":\n\n\nwhile (x == 3):\n\nwhile x == 3:\n\n\nfor (x) in (fruits):\n\nfor x in fruits:\n\n\n\ndel (x)\n\ndel x\n\n\nIn addition, basically, most example python code which I've seen so far doesn't use parentheses for assert, if, while, for and del statements so I prefer not using parentheses for them.\n" ]
[ 153, 47, 26, 19, 1, 0 ]
[]
[]
[ "assert", "parentheses", "python", "statements" ]
stackoverflow_0003112171_assert_parentheses_python_statements.txt
Q: loop through nested dictionary in python and display key value pair i am a beginner in python i and i came up this problem and i cant seem to solve it.I have the following dictionary stats = {1: {"Player": "Derrick Henry", "yards": 870, "TD": 9}, 2: {"Player": "Nick Chubb", "Yards": 841, "TD": 10}, 3: {"Player": "Saquon Barkley", "Yards": 779, "TD": 5}} I want to loop through a dictionary and display the values as shown below Player1 Player=Derrick Henry yards=870 TD=9 player 2 Player=Nnikki Chubb yards=770 TD=10 player3 Player=Nikki Chubb yards=770 TD=10 i tried the following code stats = {1: {"Player": "Derrick Henry", "Yards": 870, "TD": 9}, 2: {"Player": "Nick Chubb", "Yards": 841, "TD": 10}, 3: {"Player": "Saquon Barkley", "Yards": 779, "TD": 5}} for key, value in stats.items(): print(value) for x, y,z in value.items(): print("Player {}".format(key)) #IF Player if x == "Player": print("Player = {}".format(x)) #IF YARDS if y == "Yards": print("Yards = {}".format(y)) #IF YARDS if z == "TD": print("yards = {}".format(y)) Any help will be appreciated.Thank you A: Don't you see here the useless logic : if a variable is something, you write manualmy that thing in a string, just use it directly if x == "Player": print("Player = {}".format(x)) if y == "Yards": print("Yards = {}".format(y)) if z == "TD": print("TD = {}".format(y)) Also you did well use .items first time, but misuses it the second time, it iterate over pair, so it'll always yields 2 variable , not 3 for key, props in stats.items(): print(f"Player{key}") for prop_key, prop_value in props.items(): print(f"{prop_key}={prop_value}") A: You kinda haven't really decided yet, if you want to iterate over the nested dict or not. To iterate over it, check azro's answer. But what you are attempting is not iterating, so you can just write: print("Player = {}".format(value["Player"])) print("Yards = {}".format(value["Yards"])) print("TD = {}".format(value["TD"])) Or, as the print statements are all the same, you could loop over the keys you want to print: for key in ["Player", "Yards", "TD"]: print("{} = {}".format(key, value[key])
loop through nested dictionary in python and display key value pair
i am a beginner in python i and i came up this problem and i cant seem to solve it.I have the following dictionary stats = {1: {"Player": "Derrick Henry", "yards": 870, "TD": 9}, 2: {"Player": "Nick Chubb", "Yards": 841, "TD": 10}, 3: {"Player": "Saquon Barkley", "Yards": 779, "TD": 5}} I want to loop through a dictionary and display the values as shown below Player1 Player=Derrick Henry yards=870 TD=9 player 2 Player=Nnikki Chubb yards=770 TD=10 player3 Player=Nikki Chubb yards=770 TD=10 i tried the following code stats = {1: {"Player": "Derrick Henry", "Yards": 870, "TD": 9}, 2: {"Player": "Nick Chubb", "Yards": 841, "TD": 10}, 3: {"Player": "Saquon Barkley", "Yards": 779, "TD": 5}} for key, value in stats.items(): print(value) for x, y,z in value.items(): print("Player {}".format(key)) #IF Player if x == "Player": print("Player = {}".format(x)) #IF YARDS if y == "Yards": print("Yards = {}".format(y)) #IF YARDS if z == "TD": print("yards = {}".format(y)) Any help will be appreciated.Thank you
[ "Don't you see here the useless logic : if a variable is something, you write manualmy that thing in a string, just use it directly\nif x == \"Player\":\n print(\"Player = {}\".format(x))\nif y == \"Yards\":\n print(\"Yards = {}\".format(y))\nif z == \"TD\":\n print(\"TD = {}\".format(y))\n\n\nAlso you did well use .items first time, but misuses it the second time, it iterate over pair, so it'll always yields 2 variable , not 3\nfor key, props in stats.items():\n print(f\"Player{key}\")\n for prop_key, prop_value in props.items():\n print(f\"{prop_key}={prop_value}\")\n\n", "You kinda haven't really decided yet, if you want to iterate over the nested dict or not. To iterate over it, check azro's answer. But what you are attempting is not iterating, so you can just write:\nprint(\"Player = {}\".format(value[\"Player\"]))\nprint(\"Yards = {}\".format(value[\"Yards\"]))\nprint(\"TD = {}\".format(value[\"TD\"]))\n\nOr, as the print statements are all the same, you could loop over the keys you want to print:\nfor key in [\"Player\", \"Yards\", \"TD\"]:\n print(\"{} = {}\".format(key, value[key])\n\n" ]
[ 1, 0 ]
[]
[]
[ "dictionary", "python" ]
stackoverflow_0074509308_dictionary_python.txt
Q: PyQt QTableView resizeRowsToContents not completely resize on initialisation I have a minimum example here of a QTableView widget that displays a long string that I want word wrapped when I start the app. from PyQt6.QtWidgets import ( QMainWindow, QTableView, QHeaderView, QApplication, ) from PyQt6.QtCore import ( Qt, QEvent, QAbstractTableModel, QSize, QEvent ) import sys text = """A long string which needs word wrapping to fully display. A long string which needs word wrapping to fully display. A long string which needs word wrapping to fully display.""" class MainWindow(QMainWindow): def __init__(self): super().__init__() self.table = QTableView() header = self.table.horizontalHeader() header.setSectionResizeMode(QHeaderView.ResizeMode.Stretch) self.table.horizontalHeader().sectionResized.connect(self.table.resizeRowsToContents) self.model = TableModel([[text] for i in range(50)]) self.table.setModel(self.model) self.setCentralWidget(self.table) self.table.resizeRowsToContents() def changeEvent(self, event): if event.type() == QEvent.Type.WindowStateChange: self.table.resizeRowsToContents() return super(MainWindow, self).changeEvent(event) class TableModel(QAbstractTableModel): def __init__(self, data): super().__init__() self._data = data def data(self, index, role): if role == Qt.ItemDataRole.DisplayRole: return self._data[index.row()][index.column()] def rowCount(self, index): return len(self._data) def columnCount(self, index): return len(self._data[0]) app = QApplication(sys.argv) app.lastWindowClosed.connect(app.quit) w = MainWindow() w.show() app.exec() When I run the above I get this but when I resize the window manually just slightly, I get what I'm expecting How would I get the second image as the state when the app is started? I thought calling self.table.resizeRowsToContents() in the __init__ method would do it. Another question is, why does self.table.horizontalHeader().sectionResized.connect(self.table.resizeRowsToContents) work upon resizing when resizeRowsToContents() does not work in the __init__ method? A: why does self.table.horizontalHeader().sectionResized.connect(self.table.resizeRowsToContents) work upon resizing when resizeRowsToContents() does not work in the init method? Because the window isn't rendered yet, that's why the QTableView doesn't know yet how big the text is in order to resize the rows. How would I get the second image as the state when the app is started? I thought calling self.table.resizeRowsToContents() in the init method would do it. You could separate the population of the table from the init method, or delay it, until Your widget is rendered, preferably inside the class itself, but you can do something like this: # ... app = QApplication(sys.argv) app.lastWindowClosed.connect(app.quit) w = MainWindow() w.show() w.table.resizeRowsToContents() # I just added this line app.exec()
PyQt QTableView resizeRowsToContents not completely resize on initialisation
I have a minimum example here of a QTableView widget that displays a long string that I want word wrapped when I start the app. from PyQt6.QtWidgets import ( QMainWindow, QTableView, QHeaderView, QApplication, ) from PyQt6.QtCore import ( Qt, QEvent, QAbstractTableModel, QSize, QEvent ) import sys text = """A long string which needs word wrapping to fully display. A long string which needs word wrapping to fully display. A long string which needs word wrapping to fully display.""" class MainWindow(QMainWindow): def __init__(self): super().__init__() self.table = QTableView() header = self.table.horizontalHeader() header.setSectionResizeMode(QHeaderView.ResizeMode.Stretch) self.table.horizontalHeader().sectionResized.connect(self.table.resizeRowsToContents) self.model = TableModel([[text] for i in range(50)]) self.table.setModel(self.model) self.setCentralWidget(self.table) self.table.resizeRowsToContents() def changeEvent(self, event): if event.type() == QEvent.Type.WindowStateChange: self.table.resizeRowsToContents() return super(MainWindow, self).changeEvent(event) class TableModel(QAbstractTableModel): def __init__(self, data): super().__init__() self._data = data def data(self, index, role): if role == Qt.ItemDataRole.DisplayRole: return self._data[index.row()][index.column()] def rowCount(self, index): return len(self._data) def columnCount(self, index): return len(self._data[0]) app = QApplication(sys.argv) app.lastWindowClosed.connect(app.quit) w = MainWindow() w.show() app.exec() When I run the above I get this but when I resize the window manually just slightly, I get what I'm expecting How would I get the second image as the state when the app is started? I thought calling self.table.resizeRowsToContents() in the __init__ method would do it. Another question is, why does self.table.horizontalHeader().sectionResized.connect(self.table.resizeRowsToContents) work upon resizing when resizeRowsToContents() does not work in the __init__ method?
[ "\nwhy does self.table.horizontalHeader().sectionResized.connect(self.table.resizeRowsToContents) work upon resizing when resizeRowsToContents() does not work in the init method?\n\nBecause the window isn't rendered yet, that's why the QTableView doesn't know yet how big the text is in order to resize the rows.\n\nHow would I get the second image as the state when the app is started? I thought calling self.table.resizeRowsToContents() in the init method would do it.\n\nYou could separate the population of the table from the init method, or delay it, until Your widget is rendered, preferably inside the class itself, but you can do something like this:\n# ...\napp = QApplication(sys.argv)\napp.lastWindowClosed.connect(app.quit)\nw = MainWindow()\nw.show()\nw.table.resizeRowsToContents() # I just added this line\napp.exec()\n\n" ]
[ 1 ]
[]
[]
[ "pyqt", "pyqt5", "pyqt6", "python", "qtableview" ]
stackoverflow_0074509116_pyqt_pyqt5_pyqt6_python_qtableview.txt
Q: simple affine encryption using python problem I'm a beginner to python I'm actually trying to encrypt a message using basic python LETTERS = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"] crpt = input("please enter your message") K = 3 z = "" r = "" for i in range(len(crpt)): if crpt[i] not in LETTERS: LETTERS[r] = " " print(f"new encrypted letter: {LETTERS[r]}") z += LETTERS[r] else: icrpt = LETTERS.index(crpt[i]) r = (icrpt * K) % 26 print(f"new encrypted letter: {LETTERS[r]}") z += LETTERS[r] print(f"the initial message is: {crpt}") print(f"the new encrypted message is: {z}") I realised that there is a bug where if the message is "A A A" it only outputs "A " as if the other A's are considered a space i thought of making an if statement just for that case but it doesn't seem right. A: The problem is caused by the statement LETTERS[r] = " " and do not understand the purpose of it. When you encrypt the first "A", r become 0. Then you try to encrypt a space but there is no space character in your alphabet. So you execute the "if crypt ..." code and wipe-out your alphabet[0]. Next time you will try to encrypt a "A", the result will be a blank. To conclude: you should not modify your alphabet. I don't understand why you included that statement. all lowercase letters in your unencrypted message won't be encrypted.
simple affine encryption using python problem
I'm a beginner to python I'm actually trying to encrypt a message using basic python LETTERS = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"] crpt = input("please enter your message") K = 3 z = "" r = "" for i in range(len(crpt)): if crpt[i] not in LETTERS: LETTERS[r] = " " print(f"new encrypted letter: {LETTERS[r]}") z += LETTERS[r] else: icrpt = LETTERS.index(crpt[i]) r = (icrpt * K) % 26 print(f"new encrypted letter: {LETTERS[r]}") z += LETTERS[r] print(f"the initial message is: {crpt}") print(f"the new encrypted message is: {z}") I realised that there is a bug where if the message is "A A A" it only outputs "A " as if the other A's are considered a space i thought of making an if statement just for that case but it doesn't seem right.
[ "The problem is caused by the statement LETTERS[r] = \" \" and do not understand the purpose of it.\nWhen you encrypt the first \"A\", r become 0.\nThen you try to encrypt a space but there is no space character in your alphabet. So you execute the \"if crypt ...\" code and wipe-out your alphabet[0]. Next time you will try to encrypt a \"A\", the result will be a blank.\nTo conclude:\n\nyou should not modify your alphabet. I don't understand why you included that statement.\nall lowercase letters in your unencrypted message won't be encrypted.\n\n" ]
[ 0 ]
[]
[]
[ "cryptography", "python", "python_cryptography" ]
stackoverflow_0074509056_cryptography_python_python_cryptography.txt
Q: Auto reloading python Flask app upon code changes I'm investigating how to develop a decent web app with Python. Since I don't want some high-order structures to get in my way, my choice fell on the lightweight Flask framework. Time will tell if this was the right choice. So, now I've set up an Apache server with mod_wsgi, and my test site is running fine. However, I'd like to speed up the development routine by making the site automatically reload upon any changes in py or template files I make. I see that any changes in site's .wsgi file causes reloading (even without WSGIScriptReloading On in the apache config file), but I still have to prod it manually (ie, insert extra linebreak, save). Is there some way how to cause reload when I edit some of the app's py files? Or, I am expected to use IDE that refreshes the .wsgi file for me? A: Run the flask run CLI command with debug mode enabled, which will automatically enable the reloader. As of Flask 2.2, you can pass --app and --debug options on the command line. $ flask --app main.py --debug run --app can also be set to module:app or module:create_app instead of module.py. See the docs for a full explanation. More options are available with: $ flask run --help Prior to Flask 2.2, you needed to set the FLASK_APP and FLASK_ENV=development environment variables. $ export FLASK_APP=main.py $ export FLASK_ENV=development $ flask run It is still possible to set FLASK_APP and FLASK_DEBUG=1 in Flask 2.2. A: If you are talking about test/dev environments, then just use the debug option. It will auto-reload the flask app when a code change happens. app.run(debug=True) Or, from the shell: $ export FLASK_DEBUG=1 $ flask run http://flask.palletsprojects.com/quickstart/#debug-mode A: In test/development environments The werkzeug debugger already has an 'auto reload' function available that can be enabled by doing one of the following: app.run(debug=True) or app.debug = True You can also use a separate configuration file to manage all your setup if you need be. For example I use 'settings.py' with a 'DEBUG = True' option. Importing this file is easy too; app.config.from_object('application.settings') However this is not suitable for a production environment. Production environment Personally I chose Nginx + uWSGI over Apache + mod_wsgi for a few performance reasons but also the configuration options. The touch-reload option allows you to specify a file/folder that will cause the uWSGI application to reload your newly deployed flask app. For example, your update script pulls your newest changes down and touches 'reload_me.txt' file. Your uWSGI ini script (which is kept up by Supervisord - obviously) has this line in it somewhere: touch-reload = '/opt/virtual_environments/application/reload_me.txt' I hope this helps! A: If you're running using uwsgi look at the python auto reload option: uwsgi --py-autoreload 1 Example uwsgi-dev-example.ini: [uwsgi] socket = 127.0.0.1:5000 master = true virtualenv = /Users/xxxx/.virtualenvs/sites_env chdir = /Users/xxx/site_root module = site_module:register_debug_server() callable = app uid = myuser chmod-socket = 660 log-date = true workers = 1 py-autoreload = 1 site_root/__init__.py def register_debug_server(): from werkzeug.debug import DebuggedApplication app = Flask(__name__) app.debug = True app = DebuggedApplication(app, evalex=True) return app Then run: uwsgi --ini uwsgi-dev-example.ini Note: This example also enables the debugger. I went this route to mimic production as close as possible with my nginx setup. Simply running the flask app with it's built in web server behind nginx it would result in a bad gateway error. A: For Flask 1.0 until 2.2, the basic approach to hot re-loading is: $ export FLASK_APP=my_application $ export FLASK_ENV=development $ flask run you should use FLASK_ENV=development (not FLASK_DEBUG=1) as a safety check, you can run flask run --debugger just to make sure it's turned on the Flask CLI will now automatically read things like FLASK_APP and FLASK_ENV if you have an .env file in the project root and have python-dotenv installed A: app.run(use_reloader=True) we can use this, use_reloader so every time we reload the page our code changes will be updated. A: I got a different idea: First: pip install python-dotenv Install the python-dotenv module, which will read local preference for your project environment. Second: Add .flaskenv file in your project directory. Add following code: FLASK_ENV=development It's done! With this config for your Flask project, when you run flask run and you will see this output in your terminal: And when you edit your file, just save the change. You will see auto-reload is there for you: With more explanation: Of course you can manually hit export FLASK_ENV=development every time you need. But using different configuration file to handle the actual working environment seems like a better solution, so I strongly recommend this method I use. A: Use this method: app.run(debug=True) It will auto-reload the flask app when a code change happens. Sample code: from flask import Flask app = Flask(__name__) @app.route("/") def index(): return "Hello World" if __name__ == '__main__': app.run(debug=True) Well, if you want save time not reloading the webpage everytime when changes happen, then you can try the keyboard shortcut Ctrl + R to reload the page quickly. A: From the terminal you can simply say export FLASK_APP=app_name.py export FLASK_ENV=development flask run or in your file if __name__ == "__main__": app.run(debug=True) A: Flask applications can optionally be executed in debug mode. In this mode, two very convenient modules of the development server called the reloader and the debugger are enabled by default. When the reloader is enabled, Flask watches all the source code files of your project and automatically restarts the server when any of the files are modified. By default, debug mode is disabled. To enable it, set a FLASK_DEBUG=1 environment variable before invoking flask run: (venv) $ export FLASK_APP=hello.py for Windows use > set FLASK_APP=hello.py (venv) $ export FLASK_DEBUG=1 for Windows use > set FLASK_DEBUG=1 (venv) $ flask run * Serving Flask app "hello" * Forcing debug mode on * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) * Restarting with stat * Debugger is active! * Debugger PIN: 273-181-528 Having a server running with the reloader enabled is extremely useful during development, because every time you modify and save a source file, the server automatically restarts and picks up the change. A: Enable the reloader in flask 2.2: flask run --reload A: To achieve this in PyCharm set 'Environment Variables' section to: PYTHONUNBUFFERED=1; FLASK_DEBUG=1 For Flask 'run / debug configurations'. A: To help with fast automatic change in browser: pip install livereload from livereload import Server if __name__ == '__main__': server = Server(app.wsgi_app) server.serve() Next, Start your server again: eg. your .py file is app.py python app.py
Auto reloading python Flask app upon code changes
I'm investigating how to develop a decent web app with Python. Since I don't want some high-order structures to get in my way, my choice fell on the lightweight Flask framework. Time will tell if this was the right choice. So, now I've set up an Apache server with mod_wsgi, and my test site is running fine. However, I'd like to speed up the development routine by making the site automatically reload upon any changes in py or template files I make. I see that any changes in site's .wsgi file causes reloading (even without WSGIScriptReloading On in the apache config file), but I still have to prod it manually (ie, insert extra linebreak, save). Is there some way how to cause reload when I edit some of the app's py files? Or, I am expected to use IDE that refreshes the .wsgi file for me?
[ "Run the flask run CLI command with debug mode enabled, which will automatically enable the reloader. As of Flask 2.2, you can pass --app and --debug options on the command line.\n$ flask --app main.py --debug run\n\n--app can also be set to module:app or module:create_app instead of module.py. See the docs for a full explanation.\nMore options are available with:\n$ flask run --help\n\nPrior to Flask 2.2, you needed to set the FLASK_APP and FLASK_ENV=development environment variables.\n$ export FLASK_APP=main.py\n$ export FLASK_ENV=development\n$ flask run\n\nIt is still possible to set FLASK_APP and FLASK_DEBUG=1 in Flask 2.2.\n", "If you are talking about test/dev environments, then just use the debug option. It will auto-reload the flask app when a code change happens.\napp.run(debug=True)\n\nOr, from the shell:\n$ export FLASK_DEBUG=1\n$ flask run\n\nhttp://flask.palletsprojects.com/quickstart/#debug-mode\n", "In test/development environments\nThe werkzeug debugger already has an 'auto reload' function available that can be enabled by doing one of the following:\napp.run(debug=True)\n\nor\napp.debug = True\n\nYou can also use a separate configuration file to manage all your setup if you need be. For example I use 'settings.py' with a 'DEBUG = True' option. Importing this file is easy too;\napp.config.from_object('application.settings')\n\nHowever this is not suitable for a production environment.\nProduction environment\nPersonally I chose Nginx + uWSGI over Apache + mod_wsgi for a few performance reasons but also the configuration options. The touch-reload option allows you to specify a file/folder that will cause the uWSGI application to reload your newly deployed flask app.\nFor example, your update script pulls your newest changes down and touches 'reload_me.txt' file. Your uWSGI ini script (which is kept up by Supervisord - obviously) has this line in it somewhere:\ntouch-reload = '/opt/virtual_environments/application/reload_me.txt'\n\nI hope this helps! \n", "If you're running using uwsgi look at the python auto reload option:\nuwsgi --py-autoreload 1\n\nExample uwsgi-dev-example.ini:\n[uwsgi]\nsocket = 127.0.0.1:5000\nmaster = true\nvirtualenv = /Users/xxxx/.virtualenvs/sites_env\nchdir = /Users/xxx/site_root\nmodule = site_module:register_debug_server()\ncallable = app\nuid = myuser\nchmod-socket = 660\nlog-date = true\nworkers = 1\npy-autoreload = 1\n\nsite_root/__init__.py\ndef register_debug_server():\n from werkzeug.debug import DebuggedApplication\n\n app = Flask(__name__)\n app.debug = True\n app = DebuggedApplication(app, evalex=True)\n return app\n\nThen run:\nuwsgi --ini uwsgi-dev-example.ini\n\nNote: This example also enables the debugger.\nI went this route to mimic production as close as possible with my nginx setup. Simply running the flask app with it's built in web server behind nginx it would result in a bad gateway error.\n", "For Flask 1.0 until 2.2, the basic approach to hot re-loading is:\n$ export FLASK_APP=my_application\n$ export FLASK_ENV=development\n$ flask run\n\n\nyou should use FLASK_ENV=development (not FLASK_DEBUG=1)\nas a safety check, you can run flask run --debugger just to make sure it's turned on\nthe Flask CLI will now automatically read things like FLASK_APP and FLASK_ENV if you have an .env file in the project root and have python-dotenv installed\n\n", "app.run(use_reloader=True)\n\nwe can use this, use_reloader so every time we reload the page our code changes will be updated.\n", "I got a different idea:\nFirst:\npip install python-dotenv\n\nInstall the python-dotenv module, which will read local preference for your project environment.\nSecond:\nAdd .flaskenv file in your project directory. Add following code: \nFLASK_ENV=development\n\nIt's done!\nWith this config for your Flask project, when you run flask run and you will see this output in your terminal:\n\nAnd when you edit your file, just save the change. You will see auto-reload is there for you:\n\nWith more explanation:\nOf course you can manually hit export FLASK_ENV=development every time you need. But using different configuration file to handle the actual working environment seems like a better solution, so I strongly recommend this method I use.\n", "Use this method:\napp.run(debug=True)\n\nIt will auto-reload the flask app when a code change happens.\nSample code:\nfrom flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return \"Hello World\"\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\nWell, if you want save time not reloading the webpage everytime when changes happen, then you can try the keyboard shortcut Ctrl + R to reload the page quickly.\n", "From the terminal you can simply say\nexport FLASK_APP=app_name.py\nexport FLASK_ENV=development\nflask run\n\nor in your file\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n", "Flask applications can optionally be executed in debug mode. In this mode, two very convenient modules of the development server called the reloader and the debugger are enabled by default.\nWhen the reloader is enabled, Flask watches all the source code files of your project and automatically restarts the server when any of the files are modified.\nBy default, debug mode is disabled. To enable it, set a FLASK_DEBUG=1 environment variable before invoking flask run:\n(venv) $ export FLASK_APP=hello.py for Windows use > set FLASK_APP=hello.py\n\n(venv) $ export FLASK_DEBUG=1 for Windows use > set FLASK_DEBUG=1\n\n(venv) $ flask run\n\n* Serving Flask app \"hello\"\n* Forcing debug mode on\n* Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)\n* Restarting with stat\n* Debugger is active!\n* Debugger PIN: 273-181-528\n\nHaving a server running with the reloader enabled is extremely useful during development, because every time you modify and save a source file, the server automatically restarts and picks up the change.\n", "Enable the reloader in flask 2.2:\nflask run --reload\n\n", "To achieve this in PyCharm set 'Environment Variables' section to: \nPYTHONUNBUFFERED=1;\nFLASK_DEBUG=1\n\nFor Flask 'run / debug configurations'.\n", "To help with fast automatic change in browser:\npip install livereload\nfrom livereload import Server\n\nif __name__ == '__main__':\n server = Server(app.wsgi_app)\n server.serve()\n\nNext, Start your server again:\neg. your .py file is app.py\npython app.py\n" ]
[ 455, 299, 59, 26, 21, 15, 11, 11, 9, 3, 3, 1, 1 ]
[]
[]
[ "apache", "flask", "python" ]
stackoverflow_0016344756_apache_flask_python.txt
Q: why does not pyfirmata import? I just wanted to make python and Arduino work together. I saw tutorial that showed that we need library called "Pyfirmata" to do it. when I type "pip install pyfirmata" in command prompt, it shows that the library is already installed. but when I type "import pyfirmata" in python it shows error that library does not exist. please help me if you can. when I type "pip install pyfirmata" in command prompt, it shows that the library is already installed. but when I type "import pyfirmata" in python it shows error that library does not exist. please help me if you can. A: It works fine for me, check if your ide is using the same version of python you installed Pyfirmata with
why does not pyfirmata import?
I just wanted to make python and Arduino work together. I saw tutorial that showed that we need library called "Pyfirmata" to do it. when I type "pip install pyfirmata" in command prompt, it shows that the library is already installed. but when I type "import pyfirmata" in python it shows error that library does not exist. please help me if you can. when I type "pip install pyfirmata" in command prompt, it shows that the library is already installed. but when I type "import pyfirmata" in python it shows error that library does not exist. please help me if you can.
[ "It works fine for me, check if your ide is using the same version of python you installed Pyfirmata with\n" ]
[ 0 ]
[]
[]
[ "arduino", "pyfirmata", "python" ]
stackoverflow_0074509458_arduino_pyfirmata_python.txt
Q: How to solve the discord chatbot didnt reply message This is my python code for the discord chatbot that I want to create: import discord import os from dotenv import load_dotenv from neuralintents import GenericAssistant intents = discord.Intents.default() intents.message_content = True client = discord.Client(intents=intents) chatbot = GenericAssistant('intents.json') chatbot.train_model() chatbot.save_model() load_dotenv() TOKEN = os.getenv('TOKEN") @client.event async def on_ready(): print(f'We have logged in as {client.user}') @client.event async def on_message(message): if message.author == client.user: return if message.content.startswith("!"): response = chatbot.request(message.content[2:]) await message.channel.send(response) client.run(TOKEN) And when I run the code it will occur these errors: ERROR discord.client Ignoring exception in on_message Traceback (most recent call last): File "anaconda3\lib\site-packages\discord\client.py", line 409, in _run_event await coro(*args, **kwargs) File "chatbot.py", line 27, in on_message await message.channel.send(responses) File "anaconda3\lib\site-packages\discord\abc.py", line 1538, in send data = await state.http.send_message(channel.id, params=params) File "anaconda3\lib\site-packages\discord\http.py", line 744, in request raise HTTPException(response, data) discord.errors.HTTPException: 400 Bad Request (error code: 50006): Cannot send an empty message How can I do to solve these error? The output should be like: i: !hi bot: hello A: response is an empty string. It then tries to send an empty string, yeilding the error. Edit: It seems you might want to look up discord.py docs instead of using an alpha third party library which provides nothing of value as of now.
How to solve the discord chatbot didnt reply message
This is my python code for the discord chatbot that I want to create: import discord import os from dotenv import load_dotenv from neuralintents import GenericAssistant intents = discord.Intents.default() intents.message_content = True client = discord.Client(intents=intents) chatbot = GenericAssistant('intents.json') chatbot.train_model() chatbot.save_model() load_dotenv() TOKEN = os.getenv('TOKEN") @client.event async def on_ready(): print(f'We have logged in as {client.user}') @client.event async def on_message(message): if message.author == client.user: return if message.content.startswith("!"): response = chatbot.request(message.content[2:]) await message.channel.send(response) client.run(TOKEN) And when I run the code it will occur these errors: ERROR discord.client Ignoring exception in on_message Traceback (most recent call last): File "anaconda3\lib\site-packages\discord\client.py", line 409, in _run_event await coro(*args, **kwargs) File "chatbot.py", line 27, in on_message await message.channel.send(responses) File "anaconda3\lib\site-packages\discord\abc.py", line 1538, in send data = await state.http.send_message(channel.id, params=params) File "anaconda3\lib\site-packages\discord\http.py", line 744, in request raise HTTPException(response, data) discord.errors.HTTPException: 400 Bad Request (error code: 50006): Cannot send an empty message How can I do to solve these error? The output should be like: i: !hi bot: hello
[ "response is an empty string. It then tries to send an empty string, yeilding the error.\nEdit: It seems you might want to look up discord.py docs instead of using an alpha third party library which provides nothing of value as of now.\n" ]
[ 1 ]
[]
[]
[ "discord", "discord.py", "python" ]
stackoverflow_0074509531_discord_discord.py_python.txt
Q: Updating values inside a python list ItemList = [ {'name': 'item', 'item_code': '473', 'price': 0}, {'name': 'item', 'item_code': '510', 'price': 0}, {'name': 'item', 'item_code': '384', 'price': 0}, ] data_1 = '510' data_2 = 200 def update_item(data_1, data_2): for a in ItemList: if a['item_code'] == data_1: update_price = append(a['price'].data_2) return True I want to update the price by using the function update_item. It fails at update_price = append(a['price'].data_2) A: You can assign the value to the dictionary, with: def update_item(data_1, data_2): for a in ItemList: if a['item_code'] == data_1: a['price'] = data_2 return A: we can also use the dict update() method to solve this task: def update_item(data_1, data_2): for sub in ItemList: if data_1 in sub.values(): sub.update({'price': data_2})
Updating values inside a python list
ItemList = [ {'name': 'item', 'item_code': '473', 'price': 0}, {'name': 'item', 'item_code': '510', 'price': 0}, {'name': 'item', 'item_code': '384', 'price': 0}, ] data_1 = '510' data_2 = 200 def update_item(data_1, data_2): for a in ItemList: if a['item_code'] == data_1: update_price = append(a['price'].data_2) return True I want to update the price by using the function update_item. It fails at update_price = append(a['price'].data_2)
[ "You can assign the value to the dictionary, with:\ndef update_item(data_1, data_2):\n for a in ItemList:\n if a['item_code'] == data_1:\n a['price'] = data_2\n return\n", "we can also use the dict update() method to solve this task:\ndef update_item(data_1, data_2):\n for sub in ItemList:\n if data_1 in sub.values():\n sub.update({'price': data_2})\n\n" ]
[ 1, 1 ]
[]
[]
[ "django", "list", "python" ]
stackoverflow_0074508389_django_list_python.txt
Q: Selenium presence_of_element_located look for children of an element I was wondering if it's possible to look for children of an element with the presence_of_element_located function. I know I could just use the entire path, but that would make my code more confusing due to it's nature. My code would look something like this (much more complicated but this is the important bit): currentEl = driver.find_element(By.XPATH, ("//*[@id='2']")) func(currentEl) def func(currentEl): #Wait for the element to appear WebDriverWait(driver, 10, poll_frequency=0.001, ignored_exceptions = (StaleElementReferenceException, NoSuchElementException)).until( EC.presence_of_element_located((currentEl, (By.CLASS_NAME, "class"))) ) return So basically I'm trying to wait for the child element with the class name "class" of the WebElement named currentEl. I'we tried many things and I'd hate to resort to just using the entire path. A: Child element with class name className can be located by relative XPath .//*[contains(@class,'className')] or with relative CSS Selector .className. So, I think your code can be modified to be currentEl = driver.find_element(By.XPATH, ("//*[@id='2']")) func(currentEl) def func(currentEl): #Wait for the element to appear WebDriverWait(driver, 10, poll_frequency=0.001, ignored_exceptions = (StaleElementReferenceException, NoSuchElementException)).until( EC.presence_of_element_located((currentEl, By.XPATH, ".//*[contains(@class,'className')]"))) return Or currentEl = driver.find_element(By.XPATH, ("//*[@id='2']")) func(currentEl) def func(currentEl): #Wait for the element to appear WebDriverWait(driver, 10, poll_frequency=0.001, ignored_exceptions = (StaleElementReferenceException, NoSuchElementException)).until( EC.presence_of_element_located((currentEl, By.CSS_SELECTOR, ".className"))) return UPD I'm not sure the structure above will work. What you can do is to pass correct locator to existing expected_conditions, as following: locator1 = (By.XPATH, "//*[@id='2']//*[contains(@class,'className')]") locator2 = (By.CSS_SELECTOR, "#2 .className") def wait_for_child_element(locator): WebDriverWait(driver, 30).wait.until(EC.presence_of_element_located(locator)) #call the method above with arguments: wait_for_child_element(locator1) wait_for_child_element(locator2)
Selenium presence_of_element_located look for children of an element
I was wondering if it's possible to look for children of an element with the presence_of_element_located function. I know I could just use the entire path, but that would make my code more confusing due to it's nature. My code would look something like this (much more complicated but this is the important bit): currentEl = driver.find_element(By.XPATH, ("//*[@id='2']")) func(currentEl) def func(currentEl): #Wait for the element to appear WebDriverWait(driver, 10, poll_frequency=0.001, ignored_exceptions = (StaleElementReferenceException, NoSuchElementException)).until( EC.presence_of_element_located((currentEl, (By.CLASS_NAME, "class"))) ) return So basically I'm trying to wait for the child element with the class name "class" of the WebElement named currentEl. I'we tried many things and I'd hate to resort to just using the entire path.
[ "Child element with class name className can be located by relative XPath .//*[contains(@class,'className')] or with relative CSS Selector .className.\nSo, I think your code can be modified to be\ncurrentEl = driver.find_element(By.XPATH, (\"//*[@id='2']\"))\nfunc(currentEl)\n\ndef func(currentEl):\n #Wait for the element to appear\n WebDriverWait(driver, 10, poll_frequency=0.001, ignored_exceptions = (StaleElementReferenceException, NoSuchElementException)).until(\n EC.presence_of_element_located((currentEl, By.XPATH, \".//*[contains(@class,'className')]\")))\n return\n\nOr\ncurrentEl = driver.find_element(By.XPATH, (\"//*[@id='2']\"))\nfunc(currentEl)\n\ndef func(currentEl):\n #Wait for the element to appear\n WebDriverWait(driver, 10, poll_frequency=0.001, ignored_exceptions = (StaleElementReferenceException, NoSuchElementException)).until(\n EC.presence_of_element_located((currentEl, By.CSS_SELECTOR, \".className\")))\n return\n\nUPD\nI'm not sure the structure above will work.\nWhat you can do is to pass correct locator to existing expected_conditions, as following:\nlocator1 = (By.XPATH, \"//*[@id='2']//*[contains(@class,'className')]\")\nlocator2 = (By.CSS_SELECTOR, \"#2 .className\")\n\ndef wait_for_child_element(locator):\n WebDriverWait(driver, 30).wait.until(EC.presence_of_element_located(locator))\n\n#call the method above with arguments:\nwait_for_child_element(locator1)\nwait_for_child_element(locator2)\n\n" ]
[ 0 ]
[]
[]
[ "css_selectors", "python", "selenium", "selenium_chromedriver", "xpath" ]
stackoverflow_0074509419_css_selectors_python_selenium_selenium_chromedriver_xpath.txt
Q: Adding a new column in one DataFrame where values are based from a second DataFrame I have two DataFrames, df_a is the DataFrame we want to manipulate. I want to add a new column but the values are found in a second DataFrame with a similar column name. Let me expound. df_a contains _ | Code | Speed | Velocity | 0 | DA | 23 | 22 | 1 | ES | 23 | 22 | 2 | DA | 23 | 22 | 3 | GA | 23 | 22 | 4 | NU | 23 | 22 | df_b contains _ | Code | Name | 0 | DA | DinoAero | 1 | ES | Espeed | 2 | GA | GeoArk | 3 | NU | NewUnicorn | I want to merge or concatenate these two DataFrames that the result should look like this: _ | Code | Name | Speed | Velocity | 0 | DA | DinoAero | 23 | 22 | 1 | ES | Espeed | 23 | 22 | 2 | DA | DinoAero | 23 | 22 | 3 | GA | GeoArk | 23 | 22 | 4 | NU | NewUnicorn | 23 | 22 | A: You just want to pd.merge() (which is similar to a SQL join). In your case: new_df = pd.merge(df_a,df_b,how='left',on='Code') new_df = new_df[['Code','Name','Speed','Velocity']] # if you want to re-arrange the columns in your order
Adding a new column in one DataFrame where values are based from a second DataFrame
I have two DataFrames, df_a is the DataFrame we want to manipulate. I want to add a new column but the values are found in a second DataFrame with a similar column name. Let me expound. df_a contains _ | Code | Speed | Velocity | 0 | DA | 23 | 22 | 1 | ES | 23 | 22 | 2 | DA | 23 | 22 | 3 | GA | 23 | 22 | 4 | NU | 23 | 22 | df_b contains _ | Code | Name | 0 | DA | DinoAero | 1 | ES | Espeed | 2 | GA | GeoArk | 3 | NU | NewUnicorn | I want to merge or concatenate these two DataFrames that the result should look like this: _ | Code | Name | Speed | Velocity | 0 | DA | DinoAero | 23 | 22 | 1 | ES | Espeed | 23 | 22 | 2 | DA | DinoAero | 23 | 22 | 3 | GA | GeoArk | 23 | 22 | 4 | NU | NewUnicorn | 23 | 22 |
[ "You just want to pd.merge() (which is similar to a SQL join).\nIn your case:\nnew_df = pd.merge(df_a,df_b,how='left',on='Code')\nnew_df = new_df[['Code','Name','Speed','Velocity']] # if you want to re-arrange the columns in your order\n\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "jupyter_notebook", "pandas", "python" ]
stackoverflow_0074509515_dataframe_jupyter_notebook_pandas_python.txt
Q: How do I add operator precedence to a lark grammar for FOL with Equality? How do I modify this grammar so it matches parenthesis that are further away? ?wff: compound_wff ?compound_wff: biconditional_wff ?biconditional_wff: conditional_wff (SPACE? BICONDITIONAL_SYMBOL SPACE? biconditional_wff)* ?conditional_wff: disjunctive_wff (SPACE? CONDITIONAL_SYMBOL SPACE? conditional_wff)* ?disjunctive_wff: conjunctive_wff (SPACE? DISJUNCTION_SYMBOL SPACE? disjunctive_wff)* ?conjunctive_wff: negated_wff (SPACE? CONJUNCTION_SYMBOL SPACE? conjunctive_wff)* ?negated_wff: (NEGATION_SYMBOL SPACE)* atomic_wff ?atomic_wff: predicate | term EQUAL_TO term | quantified_wff* LEFT_PARENTHESIS SPACE? wff SPACE? RIGHT_PARENTHESIS ?term: function | NAME | VARIABLE ?predicate: PREDICATE_NAME [LEFT_PARENTHESIS term (COMMA term)* RIGHT_PARENTHESIS] ?function: FUNCTION_NAME LEFT_PARENTHESIS term (COMMA SPACE? term)* RIGHT_PARENTHESIS ?quantified_wff: curly_quantifiers | quantifiers ?curly_quantifiers: quantifier_symbol LEFT_CURLY_BRACE VARIABLE (COMMA SPACE? VARIABLE)* RIGHT_CURLY_BRACE SPACE? ?quantifiers: quantifier_symbol SPACE? VARIABLE (COMMA SPACE? VARIABLE)* SPACE? SPACE: /\s+/ COMMA: "," EQUAL_TO: "=" LEFT_PARENTHESIS: "(" RIGHT_PARENTHESIS: ")" LEFT_CURLY_BRACE: "{" RIGHT_CURLY_BRACE: "}" quantifier_symbol: UNIVERSAL_QUANTIFIER_SYMBOL | EXISTENTIAL_QUANTIFIER_SYMBOL UNIVERSAL_QUANTIFIER_SYMBOL: "\\forall" | "∀" EXISTENTIAL_QUANTIFIER_SYMBOL: "\\exists" | "∃" NAME: /[a-t]/ | /[a-t]_[1-9]\d*/ VARIABLE: /[u-z]/ | /[u-z]_[1-9]\d*/ PREDICATE_NAME: /[A-HJ-Z]/ | /[A-HJ-Z]_[1-9]\d*/ FUNCTION_NAME: /[a-z]/ | /[a-z]_[1-9]\d*/ NEGATION_SYMBOL: "\\neg" | "\\lnot" | "¬" CONJUNCTION_SYMBOL: "\\wedge" | "\\land" | "∧" DISJUNCTION_SYMBOL: "\\vee" | "\\lor" | "∨" CONDITIONAL_SYMBOL: "\\rightarrow" | "\\Rightarrow" | "\\Longrightarrow" | "\\implies" | "→" | "⇒" BICONDITIONAL_SYMBOL: "\\leftrightarrow" | "\\iff" | "↔" | "⇔" I'm trying to parse this using my grammar: Which in LaTeX is: \exists{x} \forall{y} (P(f(x, y)) \vee \forall{z}(V(z) \iff \neg R(a) \wedge B(a))) I followed the calculator example and modified my original grammar to add operator precedence which resulted in this. But it's no longer accepting the input string. I'm getting this error: lark.exceptions.UnexpectedCharacters: No terminal matches '\' in the current parser context, at line 1 col 35 \exists{x} \forall{y} (P(f(x, y)) \vee \forall{z}(V(z) \iff \neg R(a) \wed ^ Expected one of: * RIGHT_PARENTHESIS Ideally I want to force requiring parenthesis wherever possible except in the case of in front of a negated atomic_wff without parenthesis. This is to make sure only one parse tree is produced even on explicit ambiguity setting. How do I resolve this issue? Edit 1 I want to clarify that operator precedence for the same operators should be right associative. So P(a) ∧ Q(a) ∧ R(a) will resolve as P(a) ∧ (Q(a) ∧ R(a)) Edit 2 I have made the lark grammar easier to debug using proper terminals. It now parses the long latex equation, but is still ambiguous for simpler inputs like P(a) ∧ Q(a) ∧ R(a) which produces two parse trees. I still don't know what I am doing wrong. The grammar is doing right recursion and still failing to produce a single parse tree. Edit 3 My latest attempted solution works in every case except for giving negation a higher precedence. Any idea? ?wff: compound_wff compound_wff: biconditional_wff biconditional_wff: conditional_wff (SPACE? BICONDITIONAL_SYMBOL SPACE? conditional_wff)* conditional_wff: disjunctive_wff (SPACE? CONDITIONAL_SYMBOL SPACE? disjunctive_wff)* disjunctive_wff: conjunctive_wff (SPACE? DISJUNCTION_SYMBOL SPACE? conjunctive_wff)* conjunctive_wff: negated_wff (SPACE? CONJUNCTION_SYMBOL SPACE? negated_wff)* negated_wff: (NEGATION_SYMBOL SPACE?)* atomic_wff atomic_wff: predicate | term EQUAL_TO term | quantified_wff* LEFT_PARENTHESIS SPACE? wff SPACE? RIGHT_PARENTHESIS term: function | NAME | VARIABLE predicate: PREDICATE_NAME [LEFT_PARENTHESIS term (COMMA term)* RIGHT_PARENTHESIS] function: FUNCTION_NAME LEFT_PARENTHESIS term (COMMA term)* RIGHT_PARENTHESIS quantified_wff: curly_quantifiers | quantifiers curly_quantifiers: quantifier_symbol LEFT_CURLY_BRACE VARIABLE (COMMA SPACE? VARIABLE)* RIGHT_CURLY_BRACE SPACE? quantifiers: quantifier_symbol SPACE? VARIABLE (COMMA SPACE? VARIABLE)* SPACE? SPACE: /\s+/ COMMA: /,\s*/ EQUAL_TO: /\s*=\s*/ LEFT_PARENTHESIS: "(" RIGHT_PARENTHESIS: ")" LEFT_CURLY_BRACE: "{" RIGHT_CURLY_BRACE: "}" quantifier_symbol: UNIVERSAL_QUANTIFIER_SYMBOL | EXISTENTIAL_QUANTIFIER_SYMBOL UNIVERSAL_QUANTIFIER_SYMBOL: "\\forall" | "∀" EXISTENTIAL_QUANTIFIER_SYMBOL: "\\exists" | "∃" NAME: /[a-t]/ | /[a-t]_[1-9]\d*/ VARIABLE: /[u-z]/ | /[u-z]_[1-9]\d*/ PREDICATE_NAME: /[A-HJ-Z]/ | /[A-HJ-Z]_[1-9]\d*/ FUNCTION_NAME: /[a-z]/ | /[a-z]_[1-9]\d*/ NEGATION_SYMBOL: "\\neg" | "\\lnot" | "¬" CONJUNCTION_SYMBOL: "\\wedge" | "\\land" | "∧" DISJUNCTION_SYMBOL: "\\vee" | "\\lor" | "∨" CONDITIONAL_SYMBOL: "\\rightarrow" | "\\Rightarrow" | "\\Longrightarrow" | "\\implies" | "→" | "⇒" BICONDITIONAL_SYMBOL: "\\leftrightarrow" | "\\iff" | "↔" | "⇔" %ignore SPACE %ignore COMMA %ignore LEFT_CURLY_BRACE %ignore RIGHT_CURLY_BRACE So it fails to to produce a single parse tree for ¬P(a) ∧ Q(b). A: There are a couple of problems with this grammar. 1. Erroneous whitespace handling In the cascading precedence rules, each rule imposes the requirement for an additional SPACE following the leftmost symbol, even if the repetition is null. So in the cascade, these SPACEs add up as each cascading level adds it's own SPACE. (That's unsatisfiable, because your SPACE token matches any number of consecutive spaces.) So instead of: ?biconditional: conditional SPACE (biconditional_symbol SPACE conditional)* ?conditional: disjunction SPACE (conditional_symbol SPACE disjunction)* ?disjunction: conjunction SPACE (disjunction_symbol SPACE conjunction)* ?conjunction: negation SPACE (conjunction_symbol SPACE negation)* you need to move the SPACE inside the repetition group: ?biconditional: conditional (SPACE biconditional_symbol SPACE conditional)* ?conditional: disjunction (SPACE conditional_symbol SPACE disjunction)* ?disjunction: conjunction (SPACE disjunction_symbol SPACE conjunction)* ?conjunction: negation (SPACE conjunction_symbol SPACE negation)* (I have some doubts about the first two rules. Unlike disjunction and conjunction, implication is not associative, so a formula with two implication operators, P \implies Q \implies R, needs to be disambiguated, either with parentheses or precedence rules. \iff is associative, but I think P \iff Q \iff R is also a bit dubious. Personally, I'd change the repetition operator to an optionality operator in both of those rules.) But, frankly, I think you'd be better off just ignoring whitespace, as in the calculator example you claim to have based this grammar on, by removing all the SPACE tokens and adding: %import common.WS %ignore WS (or use WS_INLINE instead if you'd prefer your grammar to treat newlines as significant.) 2. The grammar requires parentheses where they shouldn't be needed. (Awaiting clarification from OP)
How do I add operator precedence to a lark grammar for FOL with Equality?
How do I modify this grammar so it matches parenthesis that are further away? ?wff: compound_wff ?compound_wff: biconditional_wff ?biconditional_wff: conditional_wff (SPACE? BICONDITIONAL_SYMBOL SPACE? biconditional_wff)* ?conditional_wff: disjunctive_wff (SPACE? CONDITIONAL_SYMBOL SPACE? conditional_wff)* ?disjunctive_wff: conjunctive_wff (SPACE? DISJUNCTION_SYMBOL SPACE? disjunctive_wff)* ?conjunctive_wff: negated_wff (SPACE? CONJUNCTION_SYMBOL SPACE? conjunctive_wff)* ?negated_wff: (NEGATION_SYMBOL SPACE)* atomic_wff ?atomic_wff: predicate | term EQUAL_TO term | quantified_wff* LEFT_PARENTHESIS SPACE? wff SPACE? RIGHT_PARENTHESIS ?term: function | NAME | VARIABLE ?predicate: PREDICATE_NAME [LEFT_PARENTHESIS term (COMMA term)* RIGHT_PARENTHESIS] ?function: FUNCTION_NAME LEFT_PARENTHESIS term (COMMA SPACE? term)* RIGHT_PARENTHESIS ?quantified_wff: curly_quantifiers | quantifiers ?curly_quantifiers: quantifier_symbol LEFT_CURLY_BRACE VARIABLE (COMMA SPACE? VARIABLE)* RIGHT_CURLY_BRACE SPACE? ?quantifiers: quantifier_symbol SPACE? VARIABLE (COMMA SPACE? VARIABLE)* SPACE? SPACE: /\s+/ COMMA: "," EQUAL_TO: "=" LEFT_PARENTHESIS: "(" RIGHT_PARENTHESIS: ")" LEFT_CURLY_BRACE: "{" RIGHT_CURLY_BRACE: "}" quantifier_symbol: UNIVERSAL_QUANTIFIER_SYMBOL | EXISTENTIAL_QUANTIFIER_SYMBOL UNIVERSAL_QUANTIFIER_SYMBOL: "\\forall" | "∀" EXISTENTIAL_QUANTIFIER_SYMBOL: "\\exists" | "∃" NAME: /[a-t]/ | /[a-t]_[1-9]\d*/ VARIABLE: /[u-z]/ | /[u-z]_[1-9]\d*/ PREDICATE_NAME: /[A-HJ-Z]/ | /[A-HJ-Z]_[1-9]\d*/ FUNCTION_NAME: /[a-z]/ | /[a-z]_[1-9]\d*/ NEGATION_SYMBOL: "\\neg" | "\\lnot" | "¬" CONJUNCTION_SYMBOL: "\\wedge" | "\\land" | "∧" DISJUNCTION_SYMBOL: "\\vee" | "\\lor" | "∨" CONDITIONAL_SYMBOL: "\\rightarrow" | "\\Rightarrow" | "\\Longrightarrow" | "\\implies" | "→" | "⇒" BICONDITIONAL_SYMBOL: "\\leftrightarrow" | "\\iff" | "↔" | "⇔" I'm trying to parse this using my grammar: Which in LaTeX is: \exists{x} \forall{y} (P(f(x, y)) \vee \forall{z}(V(z) \iff \neg R(a) \wedge B(a))) I followed the calculator example and modified my original grammar to add operator precedence which resulted in this. But it's no longer accepting the input string. I'm getting this error: lark.exceptions.UnexpectedCharacters: No terminal matches '\' in the current parser context, at line 1 col 35 \exists{x} \forall{y} (P(f(x, y)) \vee \forall{z}(V(z) \iff \neg R(a) \wed ^ Expected one of: * RIGHT_PARENTHESIS Ideally I want to force requiring parenthesis wherever possible except in the case of in front of a negated atomic_wff without parenthesis. This is to make sure only one parse tree is produced even on explicit ambiguity setting. How do I resolve this issue? Edit 1 I want to clarify that operator precedence for the same operators should be right associative. So P(a) ∧ Q(a) ∧ R(a) will resolve as P(a) ∧ (Q(a) ∧ R(a)) Edit 2 I have made the lark grammar easier to debug using proper terminals. It now parses the long latex equation, but is still ambiguous for simpler inputs like P(a) ∧ Q(a) ∧ R(a) which produces two parse trees. I still don't know what I am doing wrong. The grammar is doing right recursion and still failing to produce a single parse tree. Edit 3 My latest attempted solution works in every case except for giving negation a higher precedence. Any idea? ?wff: compound_wff compound_wff: biconditional_wff biconditional_wff: conditional_wff (SPACE? BICONDITIONAL_SYMBOL SPACE? conditional_wff)* conditional_wff: disjunctive_wff (SPACE? CONDITIONAL_SYMBOL SPACE? disjunctive_wff)* disjunctive_wff: conjunctive_wff (SPACE? DISJUNCTION_SYMBOL SPACE? conjunctive_wff)* conjunctive_wff: negated_wff (SPACE? CONJUNCTION_SYMBOL SPACE? negated_wff)* negated_wff: (NEGATION_SYMBOL SPACE?)* atomic_wff atomic_wff: predicate | term EQUAL_TO term | quantified_wff* LEFT_PARENTHESIS SPACE? wff SPACE? RIGHT_PARENTHESIS term: function | NAME | VARIABLE predicate: PREDICATE_NAME [LEFT_PARENTHESIS term (COMMA term)* RIGHT_PARENTHESIS] function: FUNCTION_NAME LEFT_PARENTHESIS term (COMMA term)* RIGHT_PARENTHESIS quantified_wff: curly_quantifiers | quantifiers curly_quantifiers: quantifier_symbol LEFT_CURLY_BRACE VARIABLE (COMMA SPACE? VARIABLE)* RIGHT_CURLY_BRACE SPACE? quantifiers: quantifier_symbol SPACE? VARIABLE (COMMA SPACE? VARIABLE)* SPACE? SPACE: /\s+/ COMMA: /,\s*/ EQUAL_TO: /\s*=\s*/ LEFT_PARENTHESIS: "(" RIGHT_PARENTHESIS: ")" LEFT_CURLY_BRACE: "{" RIGHT_CURLY_BRACE: "}" quantifier_symbol: UNIVERSAL_QUANTIFIER_SYMBOL | EXISTENTIAL_QUANTIFIER_SYMBOL UNIVERSAL_QUANTIFIER_SYMBOL: "\\forall" | "∀" EXISTENTIAL_QUANTIFIER_SYMBOL: "\\exists" | "∃" NAME: /[a-t]/ | /[a-t]_[1-9]\d*/ VARIABLE: /[u-z]/ | /[u-z]_[1-9]\d*/ PREDICATE_NAME: /[A-HJ-Z]/ | /[A-HJ-Z]_[1-9]\d*/ FUNCTION_NAME: /[a-z]/ | /[a-z]_[1-9]\d*/ NEGATION_SYMBOL: "\\neg" | "\\lnot" | "¬" CONJUNCTION_SYMBOL: "\\wedge" | "\\land" | "∧" DISJUNCTION_SYMBOL: "\\vee" | "\\lor" | "∨" CONDITIONAL_SYMBOL: "\\rightarrow" | "\\Rightarrow" | "\\Longrightarrow" | "\\implies" | "→" | "⇒" BICONDITIONAL_SYMBOL: "\\leftrightarrow" | "\\iff" | "↔" | "⇔" %ignore SPACE %ignore COMMA %ignore LEFT_CURLY_BRACE %ignore RIGHT_CURLY_BRACE So it fails to to produce a single parse tree for ¬P(a) ∧ Q(b).
[ "There are a couple of problems with this grammar.\n1. Erroneous whitespace handling\nIn the cascading precedence rules, each rule imposes the requirement for an additional SPACE following the leftmost symbol, even if the repetition is null. So in the cascade, these SPACEs add up as each cascading level adds it's own SPACE. (That's unsatisfiable, because your SPACE token matches any number of consecutive spaces.)\nSo instead of:\n?biconditional: conditional SPACE (biconditional_symbol SPACE conditional)*\n?conditional: disjunction SPACE (conditional_symbol SPACE disjunction)*\n?disjunction: conjunction SPACE (disjunction_symbol SPACE conjunction)*\n?conjunction: negation SPACE (conjunction_symbol SPACE negation)*\n\nyou need to move the SPACE inside the repetition group:\n?biconditional: conditional (SPACE biconditional_symbol SPACE conditional)*\n?conditional: disjunction (SPACE conditional_symbol SPACE disjunction)*\n?disjunction: conjunction (SPACE disjunction_symbol SPACE conjunction)*\n?conjunction: negation (SPACE conjunction_symbol SPACE negation)*\n\n(I have some doubts about the first two rules. Unlike disjunction and conjunction, implication is not associative, so a formula with two implication operators, P \\implies Q \\implies R, needs to be disambiguated, either with parentheses or precedence rules. \\iff is associative, but I think P \\iff Q \\iff R is also a bit dubious. Personally, I'd change the repetition operator to an optionality operator in both of those rules.)\nBut, frankly, I think you'd be better off just ignoring whitespace, as in the calculator example you claim to have based this grammar on, by removing all the SPACE tokens and adding:\n%import common.WS\n%ignore WS\n\n(or use WS_INLINE instead if you'd prefer your grammar to treat newlines as significant.)\n2. The grammar requires parentheses where they shouldn't be needed.\n(Awaiting clarification from OP)\n" ]
[ 0 ]
[]
[]
[ "bnf", "grammar", "lark_parser", "parsing", "python" ]
stackoverflow_0074507340_bnf_grammar_lark_parser_parsing_python.txt
Q: Python - Use multiple str.startswith() in a for loop get their specific values The below function parses multiple csv files in a directory and takes out values using str.startwith(). It works find using 'firstline.startswith('TrakPro')' and 'txt.startswith('Serial')'. However, when I add a third str.startwith() i.e. txt2.startswith('Test'), nothing prints out, no error, appears to ignore it. What do I need to change? Basically I want to add multiple str.startwith() in the for loop pulling out various key words after the ':". def get_csv_file_list(root): for r, d, f in os.walk(root): for file in f: if file.endswith('.csv'): path = os.path.join(r, file) dir_name = path.split(os.path.sep)[-2] file_name = os.path.basename(file) try: with open(path) as k: firstline = k.readline() if firstline.startswith('TrakPro'): file_list.append(path) file_list.append(dir_name) file_list.append(file_name) txt = 'Serial Number:' if txt.startswith('Serial'): for row in list(k)[3:4]: file_list.append(row[15:26]) txt2 = 'Test Name:' if txt2.startswith('Test'): for rows in list(k)[4:5]: print(rows) file_list.append(row[11:]) The csv looks like this: TrakPro Version 5.2.0.0 ASCII Data File Instrument Name:,SidePak Model Number:,TK0W02 Serial Number:,120k2136005 Test Name:,13270 Start Date:,04/17/2021 Start Time:,01:53:29 Duration (dd:hh:mm:ss):,00:07:13:00 Log Interval (mm:ss):,01:00 Number of points:,433 Description:, So far I have tried the above code, I expected to print out values in the 'Test Name' line of the csv sample. The function does not print out anything, no error. Tks A: To print only the value of the line that starts with Test Name: you can use following code: with open("your_file.csv", "r") as f_in: for line in map(str.strip, f_in): if line.startswith("Test Name:"): _, value = line.split(",", maxsplit=1) print(value) Prints: 13270 A: As I said in my comment, you are consuming k as you go. To see what is going on, run the following: file_name = "./abc.csv" file_list=[] with open(file_name) as k: firstline = k.readline() # Consume first line if firstline.startswith('TrakPro'): print("First line:\n", firstline) file_list.append(file_name) txt = list(k) # here k is the file contents minus the first line print("Contents of list\n", txt) if str(txt[2]).startswith('Serial'): file_list.append(str(txt[2])[15:26]) if str(txt[3]).startswith('Test'): file_list.append(str(txt[3].strip())[11:]) print("\nResult:\n",file_list) Where the contents of abc.csv are: TrakPro Version 5.2.0.0 ASCII Data File Instrument Name:,SidePak Model Number:,TK0W02 Serial Number:,120k2136005 Test Name:,13270 Start Date:,04/17/2021 Start Time:,01:53:29 Duration (dd:hh:mm:ss):,00:07:13:00 Log Interval (mm:ss):,01:00 Number of points:,433 Description:, The result should be: First line: TrakPro Version 5.2.0.0 ASCII Data File Contents of list ['Instrument Name:,SidePak\n', 'Model Number:,TK0W02\n', 'Serial Number:,120k2136005\n', 'Test Name:,13270\n', 'Start Date:,04/17/2021\n', 'Start Time:,01:53:29\n', 'Duration (dd:hh:mm:ss):,00:07:13:00\n', 'Log Interval (mm:ss):,01:00\n', 'Number of points:,433\n', 'Description:,\n'] Result: ['./abc.csv', '120k2136005', '13270']
Python - Use multiple str.startswith() in a for loop get their specific values
The below function parses multiple csv files in a directory and takes out values using str.startwith(). It works find using 'firstline.startswith('TrakPro')' and 'txt.startswith('Serial')'. However, when I add a third str.startwith() i.e. txt2.startswith('Test'), nothing prints out, no error, appears to ignore it. What do I need to change? Basically I want to add multiple str.startwith() in the for loop pulling out various key words after the ':". def get_csv_file_list(root): for r, d, f in os.walk(root): for file in f: if file.endswith('.csv'): path = os.path.join(r, file) dir_name = path.split(os.path.sep)[-2] file_name = os.path.basename(file) try: with open(path) as k: firstline = k.readline() if firstline.startswith('TrakPro'): file_list.append(path) file_list.append(dir_name) file_list.append(file_name) txt = 'Serial Number:' if txt.startswith('Serial'): for row in list(k)[3:4]: file_list.append(row[15:26]) txt2 = 'Test Name:' if txt2.startswith('Test'): for rows in list(k)[4:5]: print(rows) file_list.append(row[11:]) The csv looks like this: TrakPro Version 5.2.0.0 ASCII Data File Instrument Name:,SidePak Model Number:,TK0W02 Serial Number:,120k2136005 Test Name:,13270 Start Date:,04/17/2021 Start Time:,01:53:29 Duration (dd:hh:mm:ss):,00:07:13:00 Log Interval (mm:ss):,01:00 Number of points:,433 Description:, So far I have tried the above code, I expected to print out values in the 'Test Name' line of the csv sample. The function does not print out anything, no error. Tks
[ "To print only the value of the line that starts with Test Name: you can use following code:\nwith open(\"your_file.csv\", \"r\") as f_in:\n for line in map(str.strip, f_in):\n if line.startswith(\"Test Name:\"):\n _, value = line.split(\",\", maxsplit=1)\n print(value)\n\nPrints:\n13270\n\n", "As I said in my comment, you are consuming k as you go.\nTo see what is going on, run the following:\nfile_name = \"./abc.csv\"\nfile_list=[]\nwith open(file_name) as k:\n firstline = k.readline() # Consume first line\n if firstline.startswith('TrakPro'):\n print(\"First line:\\n\", firstline)\n file_list.append(file_name)\n txt = list(k) # here k is the file contents minus the first line\n print(\"Contents of list\\n\", txt)\n if str(txt[2]).startswith('Serial'):\n file_list.append(str(txt[2])[15:26])\n if str(txt[3]).startswith('Test'):\n file_list.append(str(txt[3].strip())[11:])\nprint(\"\\nResult:\\n\",file_list)\n\nWhere the contents of abc.csv are:\nTrakPro Version 5.2.0.0 ASCII Data File\nInstrument Name:,SidePak\nModel Number:,TK0W02\nSerial Number:,120k2136005\nTest Name:,13270\nStart Date:,04/17/2021\nStart Time:,01:53:29\nDuration (dd:hh:mm:ss):,00:07:13:00\nLog Interval (mm:ss):,01:00\nNumber of points:,433\nDescription:,\n\nThe result should be:\nFirst line:\n TrakPro Version 5.2.0.0 ASCII Data File\n\nContents of list\n ['Instrument Name:,SidePak\\n', 'Model Number:,TK0W02\\n', 'Serial Number:,120k2136005\\n', 'Test Name:,13270\\n', 'Start Date:,04/17/2021\\n', 'Start Time:,01:53:29\\n', 'Duration (dd:hh:mm:ss):,00:07:13:00\\n', 'Log Interval (mm:ss):,01:00\\n', 'Number of points:,433\\n', 'Description:,\\n']\n\nResult:\n ['./abc.csv', '120k2136005', '13270']\n\n" ]
[ 2, 0 ]
[]
[]
[ "csv", "python", "startswith", "string" ]
stackoverflow_0074496853_csv_python_startswith_string.txt
Q: FastAPI returns "Error 422: Unprocessable entity" when I send multipart form data with JavaScript Fetch API I have some issue with using Fetch API JavaScript method when sending some simple formData like so: function register() { var formData = new FormData(); var textInputName = document.getElementById('textInputName'); var sexButtonActive = document.querySelector('#buttonsMW > .btn.active'); var imagesInput = document.getElementById('imagesInput'); formData.append('name', textInputName.value); if (sexButtonActive != null){ formData.append('sex', sexButtonActive.html()) } else { formData.append('sex', ""); } formData.append('images', imagesInput.files[0]); fetch('/user/register', { method: 'POST', data: formData, }) .then(response => response.json()); } document.querySelector("form").addEventListener("submit", register); And on the server side (FastAPI): @app.post("/user/register", status_code=201) def register_user(name: str = Form(...), sex: str = Form(...), images: List[UploadFile] = Form(...)): try: print(name) print(sex) print(images) return "OK" except Exception as err: print(err) print(traceback.format_exc()) return "Error" After clicking on the submit button I get Error 422: Unprocessable entity. So, if I'm trying to add header Content-Type: multipart/form-data, it also doesn't help cause I get another Error 400: Bad Request. I want to understand what I am doing wrong, and how to process formData without such errors? A: The 422 response body will contain an error message about which field(s) is missing or doesn’t match the expected format. Since you haven't provided that (please do so), my guess is that the error is triggered due to how you defined the images parameter in your endpoint. Since images is expected to be a List of File(s), you should instead define it using the File type instead of Form. For example: images: List[UploadFile] = File(...) ^^^^ When using UploadFile, you don't have to use File() in the default value of the parameter. Hence, the below should also work: images: List[UploadFile] Additionally, in the frontend, make sure to use the body (not data) parameter in the fetch() function to pass the FormData object (see example in MDN Web Docs). For instance: fetch('/user/register', { method: 'POST', body: formData, }) .then(res => {... Please have a look at this answer, as well as this answer, which provide working examples on how to upload multiple files and form data to a FastAPI backend, using Fetch API in the frontend. As for manually specifying the Content-Type when sending multipart/form-data, you don't have to (and shouldn't) do that—please take a look at this answer for more details. A: So, I found that I has error in this part of code: formData.append('images', imagesInput.files[0]); Right way to upload multiple files is: for (const image of imagesInput.files) { formData.append('images', image); } Also, we should use File in FastAPI method arguments images: List[UploadFile] = File(...) (instead of Form) and change data to body in JS method. It's not an error, cause after method called, we get right type of data, for example: Name: Bob Sex: Man Images: [<starlette.datastructures.UploadFile object at 0x7fe07abf04f0>]
FastAPI returns "Error 422: Unprocessable entity" when I send multipart form data with JavaScript Fetch API
I have some issue with using Fetch API JavaScript method when sending some simple formData like so: function register() { var formData = new FormData(); var textInputName = document.getElementById('textInputName'); var sexButtonActive = document.querySelector('#buttonsMW > .btn.active'); var imagesInput = document.getElementById('imagesInput'); formData.append('name', textInputName.value); if (sexButtonActive != null){ formData.append('sex', sexButtonActive.html()) } else { formData.append('sex', ""); } formData.append('images', imagesInput.files[0]); fetch('/user/register', { method: 'POST', data: formData, }) .then(response => response.json()); } document.querySelector("form").addEventListener("submit", register); And on the server side (FastAPI): @app.post("/user/register", status_code=201) def register_user(name: str = Form(...), sex: str = Form(...), images: List[UploadFile] = Form(...)): try: print(name) print(sex) print(images) return "OK" except Exception as err: print(err) print(traceback.format_exc()) return "Error" After clicking on the submit button I get Error 422: Unprocessable entity. So, if I'm trying to add header Content-Type: multipart/form-data, it also doesn't help cause I get another Error 400: Bad Request. I want to understand what I am doing wrong, and how to process formData without such errors?
[ "The 422 response body will contain an error message about which field(s) is missing or doesn’t match the expected format. Since you haven't provided that (please do so), my guess is that the error is triggered due to how you defined the images parameter in your endpoint. Since images is expected to be a List of File(s), you should instead define it using the File type instead of Form. For example:\nimages: List[UploadFile] = File(...)\n ^^^^ \n\nWhen using UploadFile, you don't have to use File() in the default value of the parameter. Hence, the below should also work:\nimages: List[UploadFile]\n\nAdditionally, in the frontend, make sure to use the body (not data) parameter in the fetch() function to pass the FormData object (see example in MDN Web Docs). For instance:\nfetch('/user/register', {\n method: 'POST',\n body: formData,\n })\n .then(res => {...\n\nPlease have a look at this answer, as well as this answer, which provide working examples on how to upload multiple files and form data to a FastAPI backend, using Fetch API in the frontend. As for manually specifying the Content-Type when sending multipart/form-data, you don't have to (and shouldn't) do that—please take a look at this answer for more details.\n", "So, I found that I has error in this part of code:\nformData.append('images', imagesInput.files[0]);\n\nRight way to upload multiple files is:\nfor (const image of imagesInput.files) {\n formData.append('images', image);\n}\n\nAlso, we should use File in FastAPI method arguments images: List[UploadFile] = File(...) (instead of Form) and change data to body in JS method. It's not an error, cause after method called, we get right type of data, for example:\nName: Bob\nSex: Man\nImages: [<starlette.datastructures.UploadFile object at 0x7fe07abf04f0>]\n\n" ]
[ 1, 0 ]
[]
[]
[ "fastapi", "fetch", "fetch_api", "javascript", "python" ]
stackoverflow_0074507306_fastapi_fetch_fetch_api_javascript_python.txt
Q: How do I generate a vector from a pandas dataframe? Below is a screenshot of a csv file. I want to generate a vector of growth rates by using pandas. 1 The growth rate is defined as log(this year/previous year) in my case. Thank you very much! A: Welcome to SO. This should work but you should read up on how to ask good questions: import numpy as np import pandas as pd np.random.seed(42) YearCode = np.arange(1970, 1975) df = pd.DataFrame(np.random.rand(5, 3), columns = ['VariableCode', 'Region', 'AggValue']) df['YearCode'] = YearCode shifted = df['YearCode'].shift(1) df['logValue'] = df['YearCode']/shifted df['logValue'] = np.log(df['logValue']) print(df) VariableCode Region AggValue YearCode logValue 0 0.374540 0.950714 0.731994 1970 NaN 1 0.598658 0.156019 0.155995 1971 0.000507 2 0.058084 0.866176 0.601115 1972 0.000507 3 0.708073 0.020584 0.969910 1973 0.000507 4 0.832443 0.212339 0.181825 1974 0.000507 How it Works: Essentially you shift the 'YearCode' by 1 and divide the original column by the shifted one.
How do I generate a vector from a pandas dataframe?
Below is a screenshot of a csv file. I want to generate a vector of growth rates by using pandas. 1 The growth rate is defined as log(this year/previous year) in my case. Thank you very much!
[ "Welcome to SO. This should work but you should read up on how to ask good questions:\nimport numpy as np\nimport pandas as pd\n\nnp.random.seed(42)\nYearCode = np.arange(1970, 1975)\ndf = pd.DataFrame(np.random.rand(5, 3), columns = ['VariableCode', 'Region', 'AggValue'])\ndf['YearCode'] = YearCode\nshifted = df['YearCode'].shift(1)\ndf['logValue'] = df['YearCode']/shifted\ndf['logValue'] = np.log(df['logValue'])\nprint(df)\n VariableCode Region AggValue YearCode logValue\n0 0.374540 0.950714 0.731994 1970 NaN\n1 0.598658 0.156019 0.155995 1971 0.000507\n2 0.058084 0.866176 0.601115 1972 0.000507\n3 0.708073 0.020584 0.969910 1973 0.000507\n4 0.832443 0.212339 0.181825 1974 0.000507\n\nHow it Works:\nEssentially you shift the 'YearCode' by 1 and divide the original column by the shifted one.\n" ]
[ 0 ]
[]
[]
[ "dataframe", "pandas", "python", "quantitative_finance" ]
stackoverflow_0074509473_dataframe_pandas_python_quantitative_finance.txt
Q: Every time I run this function it is slower and slower I am trying to do code a simple game in python using tkinter where a block jumps over obstacles, however I got stuck on the jumping part. Every time I call the jump function it jumps slower and slower, and I don't know the reason. Ty in advance. import time import tkinter import random bg = "white" f = 2 k=0 t = 0.01 groundLevel = 550 root = tkinter.Tk() root.geometry("1000x600") canvas = tkinter.Canvas(root,width = 1000,height = 1000,bg = bg) canvas.pack(fill= tkinter.BOTH, expand= True) posX = 50 posY= 530 startButton = tkinter.Button(canvas,text=" Start ") def startPlayer(xx,yy): canvas.create_rectangle(xx-20,yy-22,xx+20,yy+18,fill = "orange") return(xx,yy) def move(x,y,x2,y2,direction,fill,outline): global f #direction 0 = up #direction 1 = down #direction 2 = left #direction 3 = right if direction == 0: canvas.create_rectangle(x,y,x2,y2,fill="cyan",outline="cyan") canvas.create_rectangle(x,y-f,x2,y2-f,fill=fill,outline=outline) if direction == 1: canvas.create_rectangle(x,y,x2,y2,fill="cyan",outline="cyan") canvas.create_rectangle(x,y+f,x2,y2+f,fill=fill,outline=outline) if direction == 2: canvas.create_rectangle(x,y,x2,y2,fill="cyan",outline="cyan") canvas.create_rectangle(x,y,x2,y2,fill=fill,outline=outline) if direction == 3: canvas.create_rectangle(x,y,x2,y2,fill="cyan",outline="cyan") canvas.create_rectangle(x,y,x2,y2,fill=fill,outline=outline) def playerJump(): global groundLevel, f, k,posX,posY,t while k != 1: move(posX-20,posY-22,posX+20,posY+18,direction = 0, fill = "orange",outline = "black") posY -= 2 canvas.update() if (posY) == 480: k = 1 time.sleep(t) k = 0 while k != 1: move(posX-20,posY-22,posX+20,posY+18,direction = 1, fill = "orange",outline = "black") posY += 2 canvas.update() if (posY) == 530: k = 1 time.sleep(t) k = 0 def start(): canvas.create_rectangle(0,0,1000,600,fill="cyan") canvas.create_line(0,550,1000,550,width = 3) startButton.destroy() startPlayer(50,530) startGameButton = tkinter.Button(canvas, text ="Go!",command = playerJump) startGameButton.place(x = 35, y=400) return(startGameButton) def resetButton(): global startGameButton startGameButton.destroy() startGameButton = tkinter.Button(canvas, text ="Go!",command = playerJump) startGameButton.place(x = 35, y=400) startImage = tkinter.PhotoImage(file="C:/Users/marti/OneDrive/Desktop/Wheel finder/startSign.png") canvas.create_rectangle(0,0,1000,1000,fill="green") startButton.config(image = startImage,command = start) startButton.place(x = 130, y= 25) canvas.create_rectangle(300,400,700,500,fill="#113B08",outline = "black",width = 3) canvas.create_text(500,450,text = "By: --------", font = "Arial 30",fill ="white") I shrinking the sleep time every time it runs so its faster, but that is only a temporary solution and it didn't even work. A: Problem with your code is you are always adding new items into your canvas. When you jump you update orange rectangle and repaint its old place. However they stack top of each other and handling too many elements makes slower your program. We create player and return it to main function. def startPlayer(xx,yy): player=canvas.create_rectangle(xx-20,yy-22,xx+20,yy+18,fill = "orange") return player This is new start function. Check that we get player and send to playerjump function. def start(): canvas.create_rectangle(0,0,1000,600,fill="cyan") canvas.create_line(0,550,1000,550,width = 3) startButton.destroy() player = startPlayer(50,530) startGameButton = tkinter.Button(canvas, text ="Go!",command = lambda :playerJump(player)) startGameButton.place(x = 35, y=400) return(startGameButton) And this is playerjump function.We get player and sent to move function. def playerJump(player): global groundLevel, f, k,posX,posY,t while k != 1: move(posX-20,posY-22,posX+20,posY+18,direction = 0, fill = "orange",outline = "black",player=player) posY -= 2 canvas.update() if (posY) == 480: k = 1 time.sleep(t) k = 0 while k != 1: move(posX-20,posY-22,posX+20,posY+18,direction = 1, fill = "orange",outline = "black",player=player) posY += 2 canvas.update() if (posY) == 530: k = 1 time.sleep(t) k = 0 Except move lines, I didn't change anything in this function. Okay now let's check key part. def move(x,y,x2,y2,direction,fill,outline,player): global f #direction 0 = up #direction 1 = down #direction 2 = left #direction 3 = right if direction == 0: canvas.coords(player,x,y-f,x2,y2-f) if direction == 1: canvas.coords(player,x,y+f,x2,y2+f) if direction == 2: canvas.coords(player,x,y,x2,y2) if direction == 3: canvas.coords(player,x,y,x2,y2) look that instead of creating new rectangles,we updated existing one. which is much more stable Also you forgot adding root.mainloop in your code snippet.
Every time I run this function it is slower and slower
I am trying to do code a simple game in python using tkinter where a block jumps over obstacles, however I got stuck on the jumping part. Every time I call the jump function it jumps slower and slower, and I don't know the reason. Ty in advance. import time import tkinter import random bg = "white" f = 2 k=0 t = 0.01 groundLevel = 550 root = tkinter.Tk() root.geometry("1000x600") canvas = tkinter.Canvas(root,width = 1000,height = 1000,bg = bg) canvas.pack(fill= tkinter.BOTH, expand= True) posX = 50 posY= 530 startButton = tkinter.Button(canvas,text=" Start ") def startPlayer(xx,yy): canvas.create_rectangle(xx-20,yy-22,xx+20,yy+18,fill = "orange") return(xx,yy) def move(x,y,x2,y2,direction,fill,outline): global f #direction 0 = up #direction 1 = down #direction 2 = left #direction 3 = right if direction == 0: canvas.create_rectangle(x,y,x2,y2,fill="cyan",outline="cyan") canvas.create_rectangle(x,y-f,x2,y2-f,fill=fill,outline=outline) if direction == 1: canvas.create_rectangle(x,y,x2,y2,fill="cyan",outline="cyan") canvas.create_rectangle(x,y+f,x2,y2+f,fill=fill,outline=outline) if direction == 2: canvas.create_rectangle(x,y,x2,y2,fill="cyan",outline="cyan") canvas.create_rectangle(x,y,x2,y2,fill=fill,outline=outline) if direction == 3: canvas.create_rectangle(x,y,x2,y2,fill="cyan",outline="cyan") canvas.create_rectangle(x,y,x2,y2,fill=fill,outline=outline) def playerJump(): global groundLevel, f, k,posX,posY,t while k != 1: move(posX-20,posY-22,posX+20,posY+18,direction = 0, fill = "orange",outline = "black") posY -= 2 canvas.update() if (posY) == 480: k = 1 time.sleep(t) k = 0 while k != 1: move(posX-20,posY-22,posX+20,posY+18,direction = 1, fill = "orange",outline = "black") posY += 2 canvas.update() if (posY) == 530: k = 1 time.sleep(t) k = 0 def start(): canvas.create_rectangle(0,0,1000,600,fill="cyan") canvas.create_line(0,550,1000,550,width = 3) startButton.destroy() startPlayer(50,530) startGameButton = tkinter.Button(canvas, text ="Go!",command = playerJump) startGameButton.place(x = 35, y=400) return(startGameButton) def resetButton(): global startGameButton startGameButton.destroy() startGameButton = tkinter.Button(canvas, text ="Go!",command = playerJump) startGameButton.place(x = 35, y=400) startImage = tkinter.PhotoImage(file="C:/Users/marti/OneDrive/Desktop/Wheel finder/startSign.png") canvas.create_rectangle(0,0,1000,1000,fill="green") startButton.config(image = startImage,command = start) startButton.place(x = 130, y= 25) canvas.create_rectangle(300,400,700,500,fill="#113B08",outline = "black",width = 3) canvas.create_text(500,450,text = "By: --------", font = "Arial 30",fill ="white") I shrinking the sleep time every time it runs so its faster, but that is only a temporary solution and it didn't even work.
[ "Problem with your code is you are always adding new items into your canvas. When you jump you update orange rectangle and repaint its old place. However they stack top of each other and handling too many elements makes slower your program.\nWe create player and return it to main function.\ndef startPlayer(xx,yy):\n player=canvas.create_rectangle(xx-20,yy-22,xx+20,yy+18,fill = \"orange\")\n return player\n\nThis is new start function. Check that we get player and send to playerjump function.\ndef start():\n canvas.create_rectangle(0,0,1000,600,fill=\"cyan\")\n canvas.create_line(0,550,1000,550,width = 3)\n startButton.destroy()\n player = startPlayer(50,530)\n startGameButton = tkinter.Button(canvas, text =\"Go!\",command = lambda :playerJump(player))\n startGameButton.place(x = 35, y=400)\n return(startGameButton)\n\nAnd this is playerjump function.We get player and sent to move function.\ndef playerJump(player):\n global groundLevel, f, k,posX,posY,t\n while k != 1:\n move(posX-20,posY-22,posX+20,posY+18,direction = 0, fill = \"orange\",outline = \"black\",player=player)\n posY -= 2\n canvas.update()\n if (posY) == 480:\n k = 1\n time.sleep(t)\n k = 0\n while k != 1:\n move(posX-20,posY-22,posX+20,posY+18,direction = 1, fill = \"orange\",outline = \"black\",player=player)\n posY += 2\n canvas.update()\n if (posY) == 530:\n k = 1\n time.sleep(t)\n k = 0\n\nExcept move lines, I didn't change anything in this function.\nOkay now let's check key part.\ndef move(x,y,x2,y2,direction,fill,outline,player):\n global f\n #direction 0 = up\n #direction 1 = down\n #direction 2 = left\n #direction 3 = right\n if direction == 0:\n canvas.coords(player,x,y-f,x2,y2-f)\n if direction == 1:\n canvas.coords(player,x,y+f,x2,y2+f)\n if direction == 2:\n canvas.coords(player,x,y,x2,y2)\n if direction == 3:\n canvas.coords(player,x,y,x2,y2)\n\nlook that instead of creating new rectangles,we updated existing one. which is much more stable\nAlso you forgot adding root.mainloop in your code snippet.\n" ]
[ 0 ]
[]
[]
[ "python", "sleep", "time", "tkinter", "while_loop" ]
stackoverflow_0074509121_python_sleep_time_tkinter_while_loop.txt
Q: What is the mean of '*' when use 'from math import *' i made a code like this. and i learnedimport *calling all module in math but i don't know mean of '*' the result is diffrent with the thing i think i thinked answer of 'd*e' is 16 also, answer of 'd**e' is 64 and so, sqrt(d**e) will be 8 i searched google but i don know the mean of * d = 8 e = 2 from math import * print(d*e) print(d**e) sqrt(d ** e) but, result was 16.88210319127114 what is the mean of '*' ?? A: * is a wildcard that loads all of the functions in that library into your local namespace.
What is the mean of '*' when use 'from math import *'
i made a code like this. and i learnedimport *calling all module in math but i don't know mean of '*' the result is diffrent with the thing i think i thinked answer of 'd*e' is 16 also, answer of 'd**e' is 64 and so, sqrt(d**e) will be 8 i searched google but i don know the mean of * d = 8 e = 2 from math import * print(d*e) print(d**e) sqrt(d ** e) but, result was 16.88210319127114 what is the mean of '*' ??
[ "* is a wildcard that loads all of the functions in that library into your local namespace.\n" ]
[ 1 ]
[]
[]
[ "import", "math", "python", "sqrt" ]
stackoverflow_0074509692_import_math_python_sqrt.txt
Q: Reading large table by chunks I have a table generated on a server and I connect to it using a presto client as follows: conn = presto.connect('hostname',port) db = "some_large_table" What I would like to do is to read in 1 chunk at a time then do my processing and append that chunk to an existing df. Ie: sql = "select column1, .. column20 limit 100" chunk_count = 0 dfs = [] for chunk in pd.read_sql_query(sql, conn, chunksize=10) : chunk_count +=1 dfs.append(chunk) print(dfs[['column1', 'column2']]) The print dfs only shows 10 rows. Which Means its not appending. A: In my query I limited the number of rows to 10. For some reason df_full.append() does not work, I changed it to df_full = df_full.append() and it works fine. sql = "select*...limit 10" df_source = pd.read_sql_query(sql, conn, chunksize=2) chunk_count = 0 df_list = [] df_full = pd.DataFrame(columns = col_names) for chunk in df_source: df_list.append(chunk) for df_item in df_list: df_full = df_full.append(df_item, ignore_index = True) print(df_full) Result: [10 rows x 38 columns] A: Appending to dataframes is really slow and should be avoided. Pandas does not do in-place appending. The dataframe is always copied to a new version. So in your code, you could do this: dfs = pd.DataFrame() #empty dataframe # then in the loop: dfs = dfs.append(chunk) And that would work. If you don't have very many chunks, then it is not bad. But as dfs grows, it will start to slow down to a crawl. I find it is best to append lod (list of dict) and then cast all at once into a dataframe, if you want one big dataframe. Thus: sql = "select column1, .. column20 limit 100" chunk_count = 0 lod = [] for chunk_df in pd.read_sql_query(sql, conn, chunksize=10) : chunk_count += 1 lod += chunk_df.to_dict(orient='records') dfs = pd.DataFrame.from_dict(lod) print(f"processed {chunk_count} chunks.\n", dfs[['column1', 'column2']]) This method does not get slower as you go, because lists can append quickly, and then the lod is converted to dfs in one shot.
Reading large table by chunks
I have a table generated on a server and I connect to it using a presto client as follows: conn = presto.connect('hostname',port) db = "some_large_table" What I would like to do is to read in 1 chunk at a time then do my processing and append that chunk to an existing df. Ie: sql = "select column1, .. column20 limit 100" chunk_count = 0 dfs = [] for chunk in pd.read_sql_query(sql, conn, chunksize=10) : chunk_count +=1 dfs.append(chunk) print(dfs[['column1', 'column2']]) The print dfs only shows 10 rows. Which Means its not appending.
[ "In my query I limited the number of rows to 10. For some reason df_full.append() does not work, I changed it to df_full = df_full.append() and it works fine. \nsql = \"select*...limit 10\"\ndf_source = pd.read_sql_query(sql, conn, chunksize=2)\nchunk_count = 0\n\ndf_list = []\ndf_full = pd.DataFrame(columns = col_names)\n\nfor chunk in df_source:\n df_list.append(chunk)\n\nfor df_item in df_list:\n df_full = df_full.append(df_item, ignore_index = True)\n\nprint(df_full)\n\nResult:\n[10 rows x 38 columns]\n\n", "Appending to dataframes is really slow and should be avoided. Pandas does not do in-place appending. The dataframe is always copied to a new version.\nSo in your code, you could do this:\ndfs = pd.DataFrame() #empty dataframe\n\n # then in the loop:\n\n dfs = dfs.append(chunk)\n\n\nAnd that would work. If you don't have very many chunks, then it is not bad. But as dfs grows, it will start to slow down to a crawl.\nI find it is best to append lod (list of dict) and then cast all at once into a dataframe, if you want one big dataframe.\nThus:\nsql = \"select column1, .. column20 limit 100\"\nchunk_count = 0\nlod = []\n\nfor chunk_df in pd.read_sql_query(sql, conn, chunksize=10) :\n chunk_count += 1\n lod += chunk_df.to_dict(orient='records')\n \ndfs = pd.DataFrame.from_dict(lod)\n\nprint(f\"processed {chunk_count} chunks.\\n\", dfs[['column1', 'column2']]) \n\nThis method does not get slower as you go, because lists can append quickly,\nand then the lod is converted to dfs in one shot.\n" ]
[ 1, 1 ]
[ "Well I can't be sure that this will work without more context but I can tell you that your issue arises because dfs is a list of data frames not a data frame... That said with this approach you will assign dfs to be equal to your first query and append subsequent querys to that result.\nsql = \"select column1, .. column20 limit 100\"\nchunk_count = 0\n\nfor chunk in pd.read_sql_query(sql, conn, chunksize=10) :\n chunk_count +=1\n try:\n dfs.append(chunk)\n except:\n dfs = chunk\n\nprint(dfs[['column1', 'column2']]) \n\n" ]
[ -1 ]
[ "pandas", "python", "python_3.x" ]
stackoverflow_0060254908_pandas_python_python_3.x.txt
Q: Detect square symbols in a diagram image in python using OpenCV I am trying to detect the square shaped symbols in a P&ID (a diagram) image file using OpenCV. I tried following tutorials that use contours, but that method doesn't seem to work with such diagram images. Using Hough Lines I am able to mark the vertical edges of these squares, but I am not sure how to use these edges detect the squares. All squares in an image have the same dimensions, but the dimensions might not be same across different images, hence template matching didn't work for me. My code using Hough Lines: import cv2 as cv import numpy as np import math img = cv.imread('test_img.jpg') img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) img_display = img.copy() ret,thresh = cv.threshold(img_gray,250,255,cv.THRESH_BINARY) image_inverted = cv.bitwise_not(thresh) linesP = cv.HoughLinesP(image_inverted, 1, np.pi / 1, 50, None, 50, 2) if linesP is not None: for i in range(0, len(linesP)): l = linesP[i][0] length = math.sqrt((l[2] - l[0])**2 + (l[3] - l[1])**2) if length < 100: cv.line(img_display, (l[0], l[1]), (l[2], l[3]), (0,0,255), 1, cv.LINE_AA) cv.imwrite('img_display.png', img_display) Input image: Output Image: In the above code, I've set it to detect only vertical lines because it wasn't detecting horizontal lines reliably. A: If you know that the lines are horizontal or vertical you can filter them out by combining erode and dilate (the docs describe how it works). After seperating horizontal and vertical lines, you can filter them by size. At the end you can fill all remaining closed contours and again use erode/delete to exract the larger shapes. This is more reliable than using Hough Line Transform and gives you more control over what exactly is extracted. Here is a demo: import numpy as np import cv2 as cv min_length = 29 max_length = 150 # erode and dilate with rectangular kernel of given dimensions def erode_dilate(image, dim): kernel = cv.getStructuringElement(cv.MORPH_RECT, dim) result = cv.erode(image, kernel) result = cv.dilate(result, kernel) return result # get contours and filter by max_size def filter_contours(image, max_size): contours, _ = cv.findContours(image, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) dims = [(cnt, cv.boundingRect(cnt)) for cnt in contours] contours = [cnt for cnt, (x, y, w, h) in dims if w <= max_size and h <= max_size] return contours # read image and get inverted threshold mask img = cv.imread('test_img.jpg') img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) ret, thres = cv.threshold(img_gray, 250, 255, cv.THRESH_BINARY_INV) # extract horizontal lines thres_h = erode_dilate(thres, (min_length, 1)) cv.imshow("horizontal", thres_h) # extract vertical lines thres_v = erode_dilate(thres, (1, min_length)) cv.imshow("vertical", thres_v) # filter lines by max_length and draw them back to res res = np.zeros_like(thres) cntrs_h = filter_contours(thres_h, max_length) cv.drawContours(res, cntrs_h, -1, 255, cv.FILLED) cntrs_v = filter_contours(thres_v, max_length) cv.drawContours(res, cntrs_v, -1, 255, cv.FILLED) cv.imshow("filtered horizontal + vertical", res) # fill remaining shapes cntrs = filter_contours(res, max_length) for c in cntrs: cv.drawContours(res, [c], -1, 255, cv.FILLED) cv.imshow("filled", res) # extract larger shapes res = erode_dilate(res, (min_length, min_length)) cv.imshow("squares", res) # draw contours of detected shapes on original image cntrs = filter_contours(res, max_length) cv.drawContours(img, cntrs, -1, (0, 0, 255), 2) cv.imshow("output", img) cv.waitKey(-1) cv.destroyAllWindows() Output:
Detect square symbols in a diagram image in python using OpenCV
I am trying to detect the square shaped symbols in a P&ID (a diagram) image file using OpenCV. I tried following tutorials that use contours, but that method doesn't seem to work with such diagram images. Using Hough Lines I am able to mark the vertical edges of these squares, but I am not sure how to use these edges detect the squares. All squares in an image have the same dimensions, but the dimensions might not be same across different images, hence template matching didn't work for me. My code using Hough Lines: import cv2 as cv import numpy as np import math img = cv.imread('test_img.jpg') img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) img_display = img.copy() ret,thresh = cv.threshold(img_gray,250,255,cv.THRESH_BINARY) image_inverted = cv.bitwise_not(thresh) linesP = cv.HoughLinesP(image_inverted, 1, np.pi / 1, 50, None, 50, 2) if linesP is not None: for i in range(0, len(linesP)): l = linesP[i][0] length = math.sqrt((l[2] - l[0])**2 + (l[3] - l[1])**2) if length < 100: cv.line(img_display, (l[0], l[1]), (l[2], l[3]), (0,0,255), 1, cv.LINE_AA) cv.imwrite('img_display.png', img_display) Input image: Output Image: In the above code, I've set it to detect only vertical lines because it wasn't detecting horizontal lines reliably.
[ "If you know that the lines are horizontal or vertical you can filter them out by combining erode and dilate (the docs describe how it works).\nAfter seperating horizontal and vertical lines, you can filter them by size. At the end you can fill all remaining closed contours and again use erode/delete to exract the larger shapes.\nThis is more reliable than using Hough Line Transform and gives you more control over what exactly is extracted.\nHere is a demo:\nimport numpy as np\nimport cv2 as cv\n\nmin_length = 29\nmax_length = 150\n\n\n# erode and dilate with rectangular kernel of given dimensions\ndef erode_dilate(image, dim):\n kernel = cv.getStructuringElement(cv.MORPH_RECT, dim)\n result = cv.erode(image, kernel)\n result = cv.dilate(result, kernel)\n return result\n\n\n# get contours and filter by max_size\ndef filter_contours(image, max_size):\n contours, _ = cv.findContours(image, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n dims = [(cnt, cv.boundingRect(cnt)) for cnt in contours]\n contours = [cnt for cnt, (x, y, w, h) in dims if w <= max_size and h <= max_size]\n return contours\n\n\n# read image and get inverted threshold mask\nimg = cv.imread('test_img.jpg')\nimg_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\nret, thres = cv.threshold(img_gray, 250, 255, cv.THRESH_BINARY_INV)\n\n# extract horizontal lines\nthres_h = erode_dilate(thres, (min_length, 1))\ncv.imshow(\"horizontal\", thres_h)\n\n# extract vertical lines\nthres_v = erode_dilate(thres, (1, min_length))\ncv.imshow(\"vertical\", thres_v)\n\n# filter lines by max_length and draw them back to res\nres = np.zeros_like(thres)\ncntrs_h = filter_contours(thres_h, max_length)\ncv.drawContours(res, cntrs_h, -1, 255, cv.FILLED)\ncntrs_v = filter_contours(thres_v, max_length)\ncv.drawContours(res, cntrs_v, -1, 255, cv.FILLED)\ncv.imshow(\"filtered horizontal + vertical\", res)\n\n# fill remaining shapes\ncntrs = filter_contours(res, max_length)\nfor c in cntrs:\n cv.drawContours(res, [c], -1, 255, cv.FILLED)\ncv.imshow(\"filled\", res)\n\n# extract larger shapes\nres = erode_dilate(res, (min_length, min_length))\ncv.imshow(\"squares\", res)\n\n# draw contours of detected shapes on original image\ncntrs = filter_contours(res, max_length)\ncv.drawContours(img, cntrs, -1, (0, 0, 255), 2)\n\ncv.imshow(\"output\", img)\ncv.waitKey(-1)\n\ncv.destroyAllWindows()\n\nOutput:\n\n" ]
[ 1 ]
[]
[]
[ "hough_transform", "opencv", "python" ]
stackoverflow_0074488983_hough_transform_opencv_python.txt
Q: Reading multiple zip archive comments with python My zip file contains a lot of smaller zip files. I want to iterate through all those files, reading and printing each of their comments. I've found out that zipfile file.zip or unzip -z file.zipcan do this to a file in separate, but I'm looking for a way to go through all of them. Couldn't find anything perfect yet, but this post. However, the code is too advanced for me, and I need something very basic, to begin with :) Any ideas or information would be great, thanks! A: Not sure exactly what your looking for but here are a few ways I did it on an Ubuntu Linux machine. for i in `ls *.zip`; do unzip -l $i; done or unzip -l myzip.zip or unzip -p myzip.zip | python -c 'import zipfile,sys,StringIO;print "\n".join(zipfile.ZipFile(StringIO.StringIO(sys.stdin.read())).namelist())' A: You can use the zipfile library to iterate through your files and get their comments using zipinfo.comment import zipfile file = zipfile.ZipFile('filepath.zip') infolist = file.infolist() for info in infolist: print(info.comment) The example above prints the comment of each file in your zip file. You could loop through your zip files and print their contents comments similiarly. Check out the official zipfile documentation, its super clear. A: A short and easy way to achieve this: from zipfile import ZipFile ziplist = ZipFile('parentzip.zip').namelist() for childzip in ziplist: zip_comment = ZipFile(childzip).comment Reminder that if you want to do string based comparisons you should either encode your reference string as bytes, or convert the comment into a string. Ex: from zipfile import ZipFile paths = ['file1.zip', 'file2.zip', 'file3.zip'] bad_str = 'please ignore me' new = [] for filename in paths: zip_comment = zipfile.ZipFile(filename).comment if not zip_comment == str.encode(bad_str): new.append(filename) paths = new
Reading multiple zip archive comments with python
My zip file contains a lot of smaller zip files. I want to iterate through all those files, reading and printing each of their comments. I've found out that zipfile file.zip or unzip -z file.zipcan do this to a file in separate, but I'm looking for a way to go through all of them. Couldn't find anything perfect yet, but this post. However, the code is too advanced for me, and I need something very basic, to begin with :) Any ideas or information would be great, thanks!
[ "Not sure exactly what your looking for but here are a few ways I did it on an Ubuntu Linux machine.\nfor i in `ls *.zip`; do unzip -l $i; done\n\nor\nunzip -l myzip.zip\n\nor\nunzip -p myzip.zip | python -c 'import zipfile,sys,StringIO;print \"\\n\".join(zipfile.ZipFile(StringIO.StringIO(sys.stdin.read())).namelist())'\n\n", "You can use the zipfile library to iterate through your files and\nget their comments using zipinfo.comment\nimport zipfile\n\nfile = zipfile.ZipFile('filepath.zip')\n\ninfolist = file.infolist()\n for info in infolist:\n print(info.comment)\n\nThe example above prints the comment of each file in your zip file.\nYou could loop through your zip files and print their contents comments similiarly.\nCheck out the official zipfile documentation, its super clear.\n", "A short and easy way to achieve this:\nfrom zipfile import ZipFile\n\nziplist = ZipFile('parentzip.zip').namelist()\n\nfor childzip in ziplist:\n zip_comment = ZipFile(childzip).comment\n\nReminder that if you want to do string based comparisons you should either encode your reference string as bytes, or convert the comment into a string. Ex:\nfrom zipfile import ZipFile\n\npaths = ['file1.zip', 'file2.zip', 'file3.zip']\n\nbad_str = 'please ignore me'\nnew = []\nfor filename in paths:\n zip_comment = zipfile.ZipFile(filename).comment\n if not zip_comment == str.encode(bad_str):\n new.append(filename)\n\npaths = new\n\n" ]
[ 1, 1, 0 ]
[]
[]
[ "archive", "python", "zip" ]
stackoverflow_0050288127_archive_python_zip.txt
Q: Python pandas selecting subset of dataframe using filter condition I have a Pandas DataFrame as below enter image description here I want to query on the columns to find out all the columns that contain 'X' for each Name. sample output be like: (John, O, P) here O and P are the column ids against John that have the character 'X'. I tried query on Dataframe on columns using loc, but didn't get the output. please guide to get the required output. A: Here is a proposition using pandas.DataFrame.apply and dict to return a dictionnary where the keys are the person names and the values are the columns names that fulfill the condition (is equal to "X") dico= dict(zip(df["Name"], df.eq("X").apply(lambda x: x.index[x].tolist(), axis=1))) # Output : print(dico) {'John': ['O', 'P'], 'Dave': ['M', 'P']}
Python pandas selecting subset of dataframe using filter condition
I have a Pandas DataFrame as below enter image description here I want to query on the columns to find out all the columns that contain 'X' for each Name. sample output be like: (John, O, P) here O and P are the column ids against John that have the character 'X'. I tried query on Dataframe on columns using loc, but didn't get the output. please guide to get the required output.
[ "Here is a proposition using pandas.DataFrame.apply and dict to return a dictionnary where the keys are the person names and the values are the columns names that fulfill the condition (is equal to \"X\")\ndico= dict(zip(df[\"Name\"], df.eq(\"X\").apply(lambda x: x.index[x].tolist(), axis=1)))\n\n# Output :\nprint(dico)\n\n{'John': ['O', 'P'], 'Dave': ['M', 'P']}\n\n" ]
[ 1 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074509652_dataframe_pandas_python.txt
Q: Subsetting pandas dataframe with list returns an apparently incorrectly sized resultant dataframe I am attempting to subset a pandas DatFrame df with a list L that contains only the column names in the DataFrame that I am interested in. The shape of df is (207, 8440) and the length of L is 6894. When I subset my dataframe as df[L] (or df.loc[:, L]), I get a bizarre result. The expected shape of the resultant DataFrame should be (207, 6894), but instead I get (207, 7092). It seems that this should not even be possible. Can anyone explain this behavior? A: [moving from comment to answer] A pandas dataframe can have multiple columns with the exact same name. If this happens, passing a list of column names can return more columns than the size of the list. You can check if the dataframe has duplicates in the column names using {col for col in df.columns if list(df.columns).count(col) > 1} This will return a set of every column that that comes up more than once.
Subsetting pandas dataframe with list returns an apparently incorrectly sized resultant dataframe
I am attempting to subset a pandas DatFrame df with a list L that contains only the column names in the DataFrame that I am interested in. The shape of df is (207, 8440) and the length of L is 6894. When I subset my dataframe as df[L] (or df.loc[:, L]), I get a bizarre result. The expected shape of the resultant DataFrame should be (207, 6894), but instead I get (207, 7092). It seems that this should not even be possible. Can anyone explain this behavior?
[ "[moving from comment to answer]\nA pandas dataframe can have multiple columns with the exact same name. If this happens, passing a list of column names can return more columns than the size of the list.\nYou can check if the dataframe has duplicates in the column names using {col for col in df.columns if list(df.columns).count(col) > 1} This will return a set of every column that that comes up more than once.\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074509554_pandas_python.txt
Q: Reading .sav with python So, I'm trying to read a .sav file using python and turn it into a .csv. I already got my code to read .sav files, and I also checked it with a test .sav, which I managed to turn into a csv. I then went on to use the real .sav file, and not it no longer works. Here is the code: import pyreadstat df, meta = pyreadstat.read_sav('./RaceDatas.sav') df.to_csv('BloodRace.csv', index=False) Here is the error: runfile('C:/Users/usuario/Desktop/Python tests/ARK Projects/no.py', wdir='C:/Users/usuario/Desktop/Python tests/ARK Projects') Traceback (most recent call last): File "C:\Program Files\Spyder\pkgs\spyder_kernels\py3compat.py", line 356, in compat_exec exec(code, globals, locals) File "c:\users\usuario\desktop\python tests\ark projects\no.py", line 3, in <module> df, meta = pyreadstat.read_sav('./RaceDatas.sav') File "pyreadstat\pyreadstat.pyx", line 364, in pyreadstat.pyreadstat.read_sav File "pyreadstat\_readstat_parser.pyx", line 1099, in pyreadstat._readstat_parser.run_conversion File "pyreadstat\_readstat_parser.pyx", line 867, in pyreadstat._readstat_parser.run_readstat_parser File "pyreadstat\_readstat_parser.pyx", line 797, in pyreadstat._readstat_parser.check_exit_status ReadstatError: Invalid file, or file has unsupported features Does anyone know what can be the problem? I thought perhaps it was the way in which the .sav I'm trying out is structured in a weird way, but its structured like normal. The .sav itself is 451 KB I tried with a simple .sav file, and it worked like intended. I tried with the main code, and it didn't work. I'm expecting it to at least turn the .sav into a python/pandas data frame to then turn into a .csv A: Here is the Solution. I Hope it would help. import pandas as pd import numpy as np import os # Set the working directory os.chdir("C:/Users/Desktop/") # Read the .sav file df = pd.read_spss("file.sav") # Print the dataframe print(df) # Write the .csv file df.to_csv("file.csv") A: I found this... It is considered as data is in observation order, e.g. all variable values for the first observation, followed by all values for the second observation, etc. The format of the data record varies depending on the compression code in the file header record. The data portion of a .sav file can be uncompressed: code 0: compressed by bytecode i would recommend using a context manager to read the file to avoid .sav file compression weirdness. Also... below is a parameter for the pyreadstat.read_sav() method: encoding (str, optional) – Defaults to None. If set, the system will use the defined encoding instead of guessing it. It has to be an iconv-compatible name your encoding options are ['ASCII', 'EBCDIC', 'utf-8'] with pyreadstat.read_sav('./RaceDatas.sav', encoding='utf-8') as savfile: savfile.to_csv('BloodRace.csv', index=False) or data = pyreadstat.read_sav('./RaceDatas.sav', encoding='ASCII') df = data[0] meta = data[1] A: The error is pretty much clear. The sav file is corrupted. There is no reason why it should work on one but not other. Here is the parsing function: https://github.com/Roche/pyreadstat/blob/master/src/spss/readstat_sav_read.c#L1570 There are only two places where READSTAT_ERROR_PARSE is returned, so you may want to check that out.
Reading .sav with python
So, I'm trying to read a .sav file using python and turn it into a .csv. I already got my code to read .sav files, and I also checked it with a test .sav, which I managed to turn into a csv. I then went on to use the real .sav file, and not it no longer works. Here is the code: import pyreadstat df, meta = pyreadstat.read_sav('./RaceDatas.sav') df.to_csv('BloodRace.csv', index=False) Here is the error: runfile('C:/Users/usuario/Desktop/Python tests/ARK Projects/no.py', wdir='C:/Users/usuario/Desktop/Python tests/ARK Projects') Traceback (most recent call last): File "C:\Program Files\Spyder\pkgs\spyder_kernels\py3compat.py", line 356, in compat_exec exec(code, globals, locals) File "c:\users\usuario\desktop\python tests\ark projects\no.py", line 3, in <module> df, meta = pyreadstat.read_sav('./RaceDatas.sav') File "pyreadstat\pyreadstat.pyx", line 364, in pyreadstat.pyreadstat.read_sav File "pyreadstat\_readstat_parser.pyx", line 1099, in pyreadstat._readstat_parser.run_conversion File "pyreadstat\_readstat_parser.pyx", line 867, in pyreadstat._readstat_parser.run_readstat_parser File "pyreadstat\_readstat_parser.pyx", line 797, in pyreadstat._readstat_parser.check_exit_status ReadstatError: Invalid file, or file has unsupported features Does anyone know what can be the problem? I thought perhaps it was the way in which the .sav I'm trying out is structured in a weird way, but its structured like normal. The .sav itself is 451 KB I tried with a simple .sav file, and it worked like intended. I tried with the main code, and it didn't work. I'm expecting it to at least turn the .sav into a python/pandas data frame to then turn into a .csv
[ "Here is the Solution. I Hope it would help.\nimport pandas as pd\nimport numpy as np\nimport os\n\n# Set the working directory\nos.chdir(\"C:/Users/Desktop/\")\n\n# Read the .sav file\ndf = pd.read_spss(\"file.sav\")\n\n# Print the dataframe\nprint(df)\n\n# Write the .csv file\ndf.to_csv(\"file.csv\")\n\n", "I found this...\n\nIt is considered as data is in observation order, e.g. all variable\nvalues for the first observation, followed by all values for the\nsecond observation, etc. The format of the data record varies\ndepending on the compression code in the file header record. The data\nportion of a .sav file can be uncompressed:\ncode 0: compressed by bytecode\n\n\ni would recommend using a context manager to read the file to avoid .sav file compression weirdness.\nAlso... below is a parameter for the pyreadstat.read_sav() method:\n\nencoding (str, optional) – Defaults to None. If set, the system will\nuse the defined encoding instead of guessing it. It has to be an\niconv-compatible name\n\nyour encoding options are ['ASCII', 'EBCDIC', 'utf-8']\nwith pyreadstat.read_sav('./RaceDatas.sav', encoding='utf-8') as savfile:\n savfile.to_csv('BloodRace.csv', index=False)\n\n\nor\ndata = pyreadstat.read_sav('./RaceDatas.sav', encoding='ASCII')\ndf = data[0]\nmeta = data[1]\n\n", "The error is pretty much clear. The sav file is corrupted. There is no reason why it should work on one but not other.\nHere is the parsing function: https://github.com/Roche/pyreadstat/blob/master/src/spss/readstat_sav_read.c#L1570\nThere are only two places where READSTAT_ERROR_PARSE is returned, so you may want to check that out.\n" ]
[ 0, 0, 0 ]
[]
[]
[ "csv", "dataframe", "pandas", "python" ]
stackoverflow_0074410261_csv_dataframe_pandas_python.txt
Q: Ideas to improve language detection between Spanish and Catalan I'm working on a text mining script in python. I need to detect the language of a natural language field from the dataset. The thing is, 98% of the rows are in Spanish and Catalan. I tried using some algorithms like the stopwords one or the langdetect library, but these languages share a lot of words so they fail a lot. I'm looking for some ideas to improve this algorithm. One thought is, make a dictionary with some words that are specific to Spanish and Catalan, so if one text has any of these words, it's tagged as that language. A: Approach 1: Distinguishing characters Spanish and Catalan (note: there will be exceptions for proper names and loanwords e.g. Barça): esp_chars = "ñÑáÁýÝ" cat_chars = "çÇàÀèÈòÒ·ŀĿ" Example: sample_texts = ["El año que es abundante de poesía, suele serlo de hambre.", "Cal no abandonar mai ni la tasca ni l'esperança."] for text in sample_texts: if any(char in text for char in esp_chars): print("Spanish: {}".format(text)) elif any(char in text for char in cat_chars): print("Catalan: {}".format(text)) >>> Spanish: El año que es abundante de poesía, suele serlo de hambre. Catalan: Cal no abandonar mai ni la tasca ni l'esperança. If this isn't sufficient, you could expand this logic to search for language exclusive digraphs, letter combinations, or words: Spanish only Catalan only Words como y su con él otro com i seva amb ell altre Initial digraphs d' l' Digraphs ss tj qü l·l l.l Terminal digraphs ig Catalan letter combinations that only marginally appear in Spanish tx tg          (Es. exceptions postgrado, postgraduado, postguerra) ny          (Es. exceptions mostly prefixed in-, en-, con- + y-) ll (terminal) (Es. exceptions (loanwords): detall, nomparell) Approach 2: googletrans library You could also use the googletrans library to detect the language: from googletrans import Translator translator = Translator() for text in sample_texts: lang = translator.detect(text).lang print(lang, ":", text) >>> es : El año que es abundante de poesía, suele serlo de hambre. ca : Cal no abandonar mai ni la tasca ni l'esperança.
Ideas to improve language detection between Spanish and Catalan
I'm working on a text mining script in python. I need to detect the language of a natural language field from the dataset. The thing is, 98% of the rows are in Spanish and Catalan. I tried using some algorithms like the stopwords one or the langdetect library, but these languages share a lot of words so they fail a lot. I'm looking for some ideas to improve this algorithm. One thought is, make a dictionary with some words that are specific to Spanish and Catalan, so if one text has any of these words, it's tagged as that language.
[ "Approach 1: Distinguishing characters\nSpanish and Catalan (note: there will be exceptions for proper names and loanwords e.g. Barça):\nesp_chars = \"ñÑáÁýÝ\"\ncat_chars = \"çÇàÀèÈòÒ·ŀĿ\"\n\nExample:\nsample_texts = [\"El año que es abundante de poesía, suele serlo de hambre.\",\n \"Cal no abandonar mai ni la tasca ni l'esperança.\"]\n\nfor text in sample_texts:\n if any(char in text for char in esp_chars):\n print(\"Spanish: {}\".format(text))\n elif any(char in text for char in cat_chars):\n print(\"Catalan: {}\".format(text))\n\n>>> Spanish: El año que es abundante de poesía, suele serlo de hambre.\n Catalan: Cal no abandonar mai ni la tasca ni l'esperança.\n\nIf this isn't sufficient, you could expand this logic to search for language exclusive digraphs, letter combinations, or words:\n\n\n\n\n\nSpanish only\nCatalan only\n\n\n\n\nWords\ncomo y su con él otro\ncom i seva amb ell altre\n\n\nInitial digraphs\n\nd' l'\n\n\nDigraphs\n\nss tj qü l·l l.l\n\n\nTerminal digraphs\n\nig\n\n\n\n\nCatalan letter combinations that only marginally appear in Spanish\n\ntx\ntg          (Es. exceptions postgrado, postgraduado, postguerra)\nny          (Es. exceptions mostly prefixed in-, en-, con- + y-)\nll (terminal) (Es. exceptions (loanwords): detall, nomparell)\n\n\nApproach 2: googletrans library\nYou could also use the googletrans library to detect the language:\nfrom googletrans import Translator\n\ntranslator = Translator()\n\nfor text in sample_texts:\n lang = translator.detect(text).lang\n print(lang, \":\", text)\n\n>>> es : El año que es abundante de poesía, suele serlo de hambre.\n ca : Cal no abandonar mai ni la tasca ni l'esperança.\n\n" ]
[ 1 ]
[ "DicCat = ['amb','cap','dalt','damunt','des','dintre','durant','excepte','fins','per','pro','sense','sota','llei','hi','ha','més','mes','moment','órgans', 'segóns','Article','i','per','els','amb','és','com','dels','més','seu','seva','fou','també','però','als','després','aquest','fins','any','són','hi','pel','aquesta','durant','on','part','altres','anys','ciutat','cap','des','seus','tot','estat','qual','segle','quan','ja','havia','molt','rei','nom','fer','així','li','sant','encara','pels','seves','té','partit','està','mateix','pot','nord','temps','fill','només','dues','sota','lloc','això','alguns','govern','uns','aquests','mort','nou','tots','fet','sense','frança','grup','tant','terme','fa','tenir','segons','món','regne','exèrcit','segona','abans','mentre','quals','aquestes','família','catalunya','eren','poden','diferents','nova','molts','església','major','club','estats','seua','diversos','grans','què','arribar','troba','població','poble','foren','època','haver','eleccions','diverses','tipus','riu','dia','quatre','poc','regió','exemple','batalla','altre','espanya','joan','actualment','tenen','dins','llavors','centre','algunes','important','altra','terra','antic','tenia','obres','estava','pare','qui','ara','havien','començar','història','morir','majoria','qui','ara','havien','començar','història','morir','majoria']\nDicEsp = ['los','y','bajo','con', 'entre','hacia','hasta','para','por','según','segun','sin','tras','más','mas','ley','capítulo','capitulo','título','titulo','momento','y','las','por','con','su','para','lo','como','más','pero','sus','le','me','sin','este','ya','cuando','todo','esta','son','también','fue','había','muy','años','hasta','desde','está','mi','porque','qué','sólo','yo','hay','vez','puede','todos','así','nos','ni','parte','tiene','él','uno','donde','bien','tiempo','mismo','ese','ahora','otro','después','te','otros','aunque','esa','eso','hace','otra','gobierno','tan','durante','siempre','día','tanto','ella','sí','dijo','sido','según','menos','año','antes','estado','sino','caso','nada','hacer','estaba','poco','estos','presidente','mayor','ante','unos','algo','hacia','casa','ellos','ayer','hecho','mucho','mientras','además','quien','momento','millones','esto','españa','hombre','están','pues','hoy','lugar','madrid','trabajo','otras','mejor','nuevo','decir','algunos','entonces','todas','días','debe','política','cómo','casi','toda','tal','luego','pasado','medio','estas','sea','tenía','nunca','aquí','ver','veces','embargo','partido','personas','grupo','cuenta','pueden','tienen','misma','nueva','cual','fueron','mujer','frente','josé','tras','cosas','fin','ciudad','he','social','tener','será','historia','muchos','juan','tipo','cuatro','dentro','nuestro','punto','dice','ello','cualquier','noche','aún','agua','parece','haber','situación','fuera','bajo','grandes','nuestra','ejemplo','acuerdo','habían','usted','estados','hizo','nadie','países','horas','posible','tarde','ley','importante','desarrollo','proceso','realidad','sentido','lado','mí','tu','cambio','allí','mano','eran','estar','san','número','sociedad','unas','centro','padre','gente','relación','cuerpo','incluso','través','último','madre','mis','modo','problema','cinco','carlos','hombres','información','ojos','muerte','nombre','algunas','público','mujeres','siglo','todavía','meses','mañana','esos','nosotros','hora','muchas','pueblo','alguna','dar','don','da','tú','derecho','verdad','maría','unidos','podría','sería','junto','cabeza','aquel','luis','cuanto','tierra','equipo','segundo','director','dicho','cierto','casos','manos','nivel','podía','familia','largo','falta','llegar','propio','ministro','cosa','primero','seguridad','hemos','mal','trata','algún','tuvo','respecto','semana','varios','real','sé','voz','paso','señor','mil','quienes','proyecto','mercado','mayoría','luz','claro','iba','éste','pesetas','orden','español','buena','quiere','aquella','programa','palabras','internacional','esas','segunda','empresa','puesto','ahí','propia','libro','igual','político','persona','últimos','ellas','total','creo','tengo','dios','española','condiciones','méxico','fuerza','solo','único','acción','amor','policía','puerta','pesar','sabe','calle','interior','tampoco','ningún','vista','campo','buen','hubiera','saber','obras','razón','niños','presencia','tema','dinero','comisión','antonio','servicio','hijo','última','ciento','estoy','hablar','dio','minutos','producción','camino','seis','quién','fondo','dirección','papel','demás','idea','especial','diferentes','dado','base','capital','ambos','europa','libertad','relaciones','espacio','medios','ir','actual','población','empresas','estudio','salud','servicios','haya','principio','siendo','cultura','anterior','alto','media','mediante','primeros','arte','paz','sector','imagen','medida','deben','datos','consejo','personal','interés','julio','grupos','miembros','ninguna','existe','cara','edad','movimiento','visto','llegó','puntos','actividad','bueno','uso','niño','difícil','joven','futuro','aquellos','mes','pronto','soy','hacía','nuevos','nuestros','estaban','posibilidad','sigue','cerca','resultados','educación','atención','gonzález','capacidad','efecto','necesario','valor','aire','investigación','siguiente','figura','central','comunidad','necesidad','serie','organizació','nuevas','calidad']\nDicEng = ['all','my','have','do','and', 'or', 'what', 'can', 'you', 'the', 'on', 'it', 'at', 'since', 'for', 'ago', 'before', 'past', 'by', 'next', 'from','with', 'wich','law','is','the','of','and','to','in','is','you','that','it','he','was','for','on','are','as','with','his','they','at','be','this','have','from','or','one','had','by','word','but','not','what','all','were','we','when','your','can','said','there','use','an','each','which','she','do','how','their','if','will','up','other','about','out','many','then','them','these','so','some','her','would','make','like','him','into','time','has','look','two','more','write','go','see','number','no','way','could','people','my','than','first','water','been','call','who','oil','its','now','find','long','down','day','did','get','come','made','may','part','may','part']\n\n\ndef WhichLanguage(text):\n Input = text.lower().split(\" \")\n CatScore = []\n EspScore = []\n EngScore = []\n\n for e in Input:\n if e in DicCat:\n CatScore.append(e)\n if e in DicEsp:\n EspScore.append(e)\n if e in DicEng:\n EngScore.append(e)\n\n if(len(EngScore) > len(EspScore)) and (len(EngScore) > len(CatScore)):\n Language ='English'\n else:\n if(len(CatScore) > len(EspScore)):\n Language ='Catala'\n else:\n Language ='Espanyol'\n print(text)\n print(\"ESP= \",len(EspScore),EspScore) \n print(\"Cat = \",len(CatScore), CatScore)\n print(\"ING= \",len(EngScore),EngScore)\n print( 'Language is =', Language)\n print(\"-----\")\n return(Language)\n\n\nprint(WhichLanguage(\"Hola bon dia\"))\n\n" ]
[ -1 ]
[ "language_detection", "python" ]
stackoverflow_0045672720_language_detection_python.txt
Q: Send a wake on lan packet from a docker container I have a docker container running a python uwsgi app. The app sends a wake on lan broadcast packet to wake a pc in the local network. It works fine without the use of docker (normal uwsgi app directly on the server), but with docker it won't work. I exposed port 9/udp and bound it port 9 of the host system. What am I missing here? Or with other words, how can I send a wake on lan command from a docker container to the outside network? A: It seems that UDP broadcast from docker isn't being routed properly (possibly only broadcasted in the container itself, not on the host). You can't send UDP WoL messages directly, as the device you're trying to control is 'offline' it doesn't show up in your router's ARP table and thus the direct message can't be delivered. You may try setting (CLI) --network host or (compose) network_mode: host. If you feel this may compromise security (since your container's/host network are more directly 'connected') or otherwise interferes with your container; you may create/use a separated 'WoL' container. A: There's a mix of partially correct answers in the above comments. You do want to send your packet to port 9 on the host but: It's the NIC that listens on port 9, not the OS. In other words, you need to configure the system's NIC to listen for magic packets. When the NIC receives the packet (on port 9, containing its own MAC address), the NIC will start the system by sending a signal via the PCI bus. You don't need need to set up a service to listen on port 9. That's built into most NICs instead. The "sender" of the magic packet needs to be on the same network segment as the target. This means that your Docker container will need to be built with "--network host". This makes your host machine the sender (even though it's coming from a Docker container) and the Docker host must be in the same network segment as the target (i.e., their broadcast addresses match). There's no need to map port 9 between the container and the host. That said, you may run into issues with "--network host" and accessing the app if it tries to use a port that another service is already using. Experimentation is needed. You might need to configure the web server to listen on a different port.
Send a wake on lan packet from a docker container
I have a docker container running a python uwsgi app. The app sends a wake on lan broadcast packet to wake a pc in the local network. It works fine without the use of docker (normal uwsgi app directly on the server), but with docker it won't work. I exposed port 9/udp and bound it port 9 of the host system. What am I missing here? Or with other words, how can I send a wake on lan command from a docker container to the outside network?
[ "It seems that UDP broadcast from docker isn't being routed properly (possibly only broadcasted in the container itself, not on the host).\nYou can't send UDP WoL messages directly, as the device you're trying to control is 'offline' it doesn't show up in your router's ARP table and thus the direct message can't be delivered.\nYou may try setting (CLI) --network host or (compose) network_mode: host.\nIf you feel this may compromise security (since your container's/host network are more directly 'connected') or otherwise interferes with your container; you may create/use a separated 'WoL' container.\n", "There's a mix of partially correct answers in the above comments. You do want to send your packet to port 9 on the host but:\n\nIt's the NIC that listens on port 9, not the OS. In other words, you need to configure the system's NIC to listen for magic packets. When the NIC receives the packet (on port 9, containing its own MAC address), the NIC will start the system by sending a signal via the PCI bus.\nYou don't need need to set up a service to listen on port 9. That's built into most NICs instead.\nThe \"sender\" of the magic packet needs to be on the same network segment as the target. This means that your Docker container will need to be built with \"--network host\". This makes your host machine the sender (even though it's coming from a Docker container) and the Docker host must be in the same network segment as the target (i.e., their broadcast addresses match).\nThere's no need to map port 9 between the container and the host. That said, you may run into issues with \"--network host\" and accessing the app if it tries to use a port that another service is already using. Experimentation is needed. You might need to configure the web server to listen on a different port.\n\n" ]
[ 3, 0 ]
[]
[]
[ "docker", "python", "uwsgi", "wake_on_lan" ]
stackoverflow_0033101603_docker_python_uwsgi_wake_on_lan.txt
Q: How can i access spider's file data in items file in scrapy python? FlipKart.py main spider file for scrap name, price, and link from flipkart.com import scrapy from ..items import FlipkartScraperItem class FlipkartSpider(scrapy.Spider): name = 'FlipKart' allowed_domains = ['www.flipkart.com'] start_urls = ['https://www.flipkart.com/search?q=mobile'] def parse(self, response): products = response.css('._2kHMtA') for product in products: item = FlipkartScraperItem() item['name'] = product.css('._4rR01T').get(), item['price'] = product.css('._2kHMtA ._1_WHN1').get(), item['link'] = product.css("._1fQZEK::attr('href')").get() yield item Items.py File Here I wanted to print the name variable import scrapy from scrapy.loader import ItemLoader from itemloaders.processors import TakeFirst # TakeFirst text from data from itemloaders.processors import MapCompose # For function calling from w3lib.html import remove_tags # For removing html tags def removeRupeeSymbol(value): return value.replace('₹', '').strip() class FlipkartScraperItem(scrapy.Item): # define the fields for your item here like: name = scrapy.Field(input_processor = MapCompose(remove_tags), output_processor = TakeFirst()) price = scrapy.Field(input_processor = MapCompose(remove_tags, removeRupeeSymbol), output_processor = TakeFirst()) link = scrapy.Field() print(name) I want to scrap Flipkart mobile phones data and store them in CSV with some changes in that data. I have written a function called removeRupeeSymbol to clean data and then after I want to store that data in CSV file but I am not able to access that data when I try to print those data it gives me the address of the variable instead of the data. here is the result when I print the name variable {'input_processor': <itemloaders.processors.MapCompose object at 0x000001DE10CBD290>, 'output_processor': <itemloaders.processors.TakeFirst object at 0x000001DE10CBD390>} A: To pull the desired data, you can try to implement the next working example. Full working code as an example: import scrapy from ..items import FlipkartScraperItem from itemloaders import ItemLoader class FlipkartSpider(scrapy.Spider): name = 'flipKart' allowed_domains = ['www.flipkart.com'] start_urls = ['https://www.flipkart.com/search?q=mobile'] def parse(self, response): products = response.css('._2kHMtA') for product in products: u = 'https://www.flipkart.com' + product.css( "._1fQZEK::attr('href')").get() loader = ItemLoader(item=FlipkartScraperItem(),selector = product) loader.add_css('name', '._4rR01T::text') loader.add_css('price', '._2kHMtA ._1_WHN1::text') loader.add_value('link', u) item = loader.load_item() yield item items.py file: import scrapy from scrapy.loader import ItemLoader from itemloaders.processors import TakeFirst # TakeFirst text from data from itemloaders.processors import MapCompose # For function calling #from w3lib.html import remove_tags # For removing html tags def removeRupeeSymbol(value): return value.replace('₹', '').strip() class FlipkartScraperItem(scrapy.Item): # define the fields for your item here like: name = scrapy.Field(output_processor = TakeFirst()) price = scrapy.Field(input_processor = MapCompose(removeRupeeSymbol), output_processor = TakeFirst()) link = scrapy.Field(output_processor = TakeFirst()) Output: 'name': 'APPLE iPhone 11 (White, 128 GB)', 'price': '44,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-5g-rainbow-fantasy-128-gb/p/itm594222523bd8f?pid=MOBGB9TYGW5NGXVH&lid=LSTMOBGB9TYGW5NGXVHWMF5TV&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_3&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYGW5NGXVH.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 5G (Rainbow Fantasy, 128 GB)', 'price': '15,990'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-5g-rainbow-fantasy-128-gb/p/itm594222523bd8f?pid=MOBGB9TYFQR3FQZT&lid=LSTMOBGB9TYFQR3FQZTZ6EEUD&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_4&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYFQR3FQZT.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 5G (Rainbow Fantasy, 128 GB)', 'price': '16,990'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-5g-starlight-black-128-gb/p/itm594222523bd8f?pid=MOBGB9TYF7P7RNYX&lid=LSTMOBGB9TYF7P7RNYX5GJVDV&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_5&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYF7P7RNYX.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 5G (Starlight Black, 128 GB)', 'price': '16,990'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-5g-starlight-black-128-gb/p/itm594222523bd8f?pid=MOBGB9TYNDFYKNQ6&lid=LSTMOBGB9TYNDFYKNQ6QMGH15&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_6&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYNDFYKNQ6.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 5G (Starlight Black, 128 GB)', 'price': '15,990'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-44w-starry-sky-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVHNBBBBP5&lid=LSTMOBGDRHVHNBBBBP5SY2MJL&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_7&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVHNBBBBP5.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 44W (Starry Sky, 128 GB)', 'price': '14,499'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-44w-starry-sky-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVMW2UDXZY&lid=LSTMOBGDRHVMW2UDXZYVNQXYN&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_8&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVMW2UDXZY.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 44W (Starry Sky, 128 GB)', 'price': '15,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-44w-midnight-galaxy-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVZN29ZJF4&lid=LSTMOBGDRHVZN29ZJF4WEHAX7&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_9&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVZN29ZJF4.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 44W (Midnight Galaxy, 128 GB)', 'price': '14,499'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-5g-starlight-black-128-gb/p/itm594222523bd8f?pid=MOBGB9TYEDGEXQRA&lid=LSTMOBGB9TYEDGEXQRAKBELKI&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_10&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYEDGEXQRA.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 5G (Starlight Black, 128 GB)', 'price': '19,990'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-5g-silky-white-128-gb/p/itm594222523bd8f?pid=MOBGHNKGG77MVYBG&lid=LSTMOBGHNKGG77MVYBGQMSTRZ&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_11&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGHNKGG77MVYBG.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 5G (Silky White, 128 GB)', 'price': '15,990'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/samsung-galaxy-f13-nightsky-green-64-gb/p/itmeadfda1bd23fa?pid=MOBGENJWF4KJTPEN&lid=LSTMOBGENJWF4KJTPENAUQVSZ&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_12&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGENJWF4KJTPEN.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'SAMSUNG Galaxy F13 (Nightsky Green, 64 GB)', 'price': '11,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/redmi-10-midnight-black-64-gb/p/itmd93641e4ebb47?pid=MOBGC9GYEBH3GZ4E&lid=LSTMOBGC9GYEBH3GZ4ESWAKTT&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_13&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGC9GYEBH3GZ4E.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'REDMI 10 (Midnight Black, 64 GB)', 'price': '10,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/motorola-e40-carbon-gray-64-gb/p/itm0ca635007c9e2?pid=MOBG2EMWUMUFGSZE&lid=LSTMOBG2EMWUMUFGSZEJNGZMU&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_14&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBG2EMWUMUFGSZE.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'MOTOROLA e40 (Carbon Gray, 64 GB)', 'price': '9,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/motorola-g40-fusion-frosted-champagne-128-gb/p/itm78278061a0e25?pid=MOBFWSF8Q3XAHTZH&lid=LSTMOBFWSF8Q3XAHTZHU8WKTS&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_15&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBFWSF8Q3XAHTZH.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'MOTOROLA G40 Fusion (Frosted Champagne, 128 GB)', 'price': '13,499'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/redmi-10-pacific-blue-64-gb/p/itm0f2a6a2112b75?pid=MOBGC9GYCHQZK9GW&lid=LSTMOBGC9GYCHQZK9GW8N0WII&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_16&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGC9GYCHQZK9GW.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'REDMI 10 (Pacific Blue, 64 GB)', 'price': '10,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/apple-iphone-11-black-64-gb/p/itm4e5041ba101fd?pid=MOBFWQ6BXGJCEYNY&lid=LSTMOBFWQ6BXGJCEYNYZXSHRJ&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_17&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBFWQ6BXGJCEYNY.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'APPLE iPhone 11 (Black, 64 GB)', 'price': '39,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/samsung-galaxy-f13-waterfall-blue-64-gb/p/itm583ef432b2b0c?pid=MOBGENJWBPFYJSFT&lid=LSTMOBGENJWBPFYJSFT1ZY7B0&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&spotlightTagId=BestsellerId_tyy%2F4io&srno=s_1_18&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGENJWBPFYJSFT.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'SAMSUNG Galaxy F13 (Waterfall Blue, 64 GB)', 'price': '11,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1x-space-blue-64-gb/p/itm9e8207e7825a9?pid=MOBGG56ZFNPMHBWE&lid=LSTMOBGG56ZFNPMHBWEB8Y2U5&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_19&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGG56ZFNPMHBWE.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1X (Space Blue, 64 GB)', 'price': '11,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1x-gravity-black-64-gb/p/itm9e8207e7825a9?pid=MOBGG56ZMXMNUCYF&lid=LSTMOBGG56ZMXMNUCYFQ5HT8S&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_20&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGG56ZMXMNUCYF.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1X (Gravity Black, 64 GB)', 'price': '11,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-44w-midnight-galaxy-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVXFVCGS23&lid=LSTMOBGDRHVXFVCGS23RDLBPG&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_21&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVXFVCGS23.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 44W (Midnight Galaxy, 128 GB)', 'price': '15,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/vivo-t1-44w-midnight-galaxy-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVWJUFTYQJ&lid=LSTMOBGDRHVWJUFTYQJ2SYXAC&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_22&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVWJUFTYQJ.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'vivo T1 44W (Midnight Galaxy, 128 GB)', 'price': '17,999'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/samsung-galaxy-f23-5g-aqua-blue-128-gb/p/itme54bc0c2292f4?pid=MOBGBKQF45XPEUHA&lid=LSTMOBGBKQF45XPEUHAYAHBJE&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_23&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGBKQF45XPEUHA.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'SAMSUNG Galaxy F23 5G (Aqua Blue, 128 GB)', 'price': '18,499'} 2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile> {'link': 'https://www.flipkart.com/motorola-e40-pink-clay-64-gb/p/itm5d6f2871d1bbf?pid=MOBG2EMW2ZUR4BFG&lid=LSTMOBG2EMW2ZUR4BFGEC0C0J&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_24&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBG2EMW2ZUR4BFG.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b', 'name': 'MOTOROLA e40 (Pink Clay, 64 GB)', 'price': '9,999'}
How can i access spider's file data in items file in scrapy python?
FlipKart.py main spider file for scrap name, price, and link from flipkart.com import scrapy from ..items import FlipkartScraperItem class FlipkartSpider(scrapy.Spider): name = 'FlipKart' allowed_domains = ['www.flipkart.com'] start_urls = ['https://www.flipkart.com/search?q=mobile'] def parse(self, response): products = response.css('._2kHMtA') for product in products: item = FlipkartScraperItem() item['name'] = product.css('._4rR01T').get(), item['price'] = product.css('._2kHMtA ._1_WHN1').get(), item['link'] = product.css("._1fQZEK::attr('href')").get() yield item Items.py File Here I wanted to print the name variable import scrapy from scrapy.loader import ItemLoader from itemloaders.processors import TakeFirst # TakeFirst text from data from itemloaders.processors import MapCompose # For function calling from w3lib.html import remove_tags # For removing html tags def removeRupeeSymbol(value): return value.replace('₹', '').strip() class FlipkartScraperItem(scrapy.Item): # define the fields for your item here like: name = scrapy.Field(input_processor = MapCompose(remove_tags), output_processor = TakeFirst()) price = scrapy.Field(input_processor = MapCompose(remove_tags, removeRupeeSymbol), output_processor = TakeFirst()) link = scrapy.Field() print(name) I want to scrap Flipkart mobile phones data and store them in CSV with some changes in that data. I have written a function called removeRupeeSymbol to clean data and then after I want to store that data in CSV file but I am not able to access that data when I try to print those data it gives me the address of the variable instead of the data. here is the result when I print the name variable {'input_processor': <itemloaders.processors.MapCompose object at 0x000001DE10CBD290>, 'output_processor': <itemloaders.processors.TakeFirst object at 0x000001DE10CBD390>}
[ "To pull the desired data, you can try to implement the next working example.\nFull working code as an example:\nimport scrapy\nfrom ..items import FlipkartScraperItem\nfrom itemloaders import ItemLoader\n\nclass FlipkartSpider(scrapy.Spider):\n\n name = 'flipKart'\n allowed_domains = ['www.flipkart.com']\n start_urls = ['https://www.flipkart.com/search?q=mobile']\n\n def parse(self, response):\n products = response.css('._2kHMtA')\n\n for product in products:\n u = 'https://www.flipkart.com' + product.css( \"._1fQZEK::attr('href')\").get()\n loader = ItemLoader(item=FlipkartScraperItem(),selector = product)\n loader.add_css('name', '._4rR01T::text')\n loader.add_css('price', '._2kHMtA ._1_WHN1::text')\n loader.add_value('link', u)\n \n item = loader.load_item()\n yield item\n\nitems.py file:\nimport scrapy\nfrom scrapy.loader import ItemLoader\nfrom itemloaders.processors import TakeFirst # TakeFirst text from data\nfrom itemloaders.processors import MapCompose # For function calling\n#from w3lib.html import remove_tags # For removing html tags\n\ndef removeRupeeSymbol(value):\n return value.replace('₹', '').strip()\n\nclass FlipkartScraperItem(scrapy.Item):\n # define the fields for your item here like:\n name = scrapy.Field(output_processor = TakeFirst())\n price = scrapy.Field(input_processor = MapCompose(removeRupeeSymbol), output_processor = TakeFirst())\n link = scrapy.Field(output_processor = TakeFirst())\n\nOutput:\n'name': 'APPLE iPhone 11 (White, 128 GB)',\n 'price': '44,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-5g-rainbow-fantasy-128-gb/p/itm594222523bd8f?pid=MOBGB9TYGW5NGXVH&lid=LSTMOBGB9TYGW5NGXVHWMF5TV&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_3&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYGW5NGXVH.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 5G (Rainbow Fantasy, 128 GB)',\n 'price': '15,990'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-5g-rainbow-fantasy-128-gb/p/itm594222523bd8f?pid=MOBGB9TYFQR3FQZT&lid=LSTMOBGB9TYFQR3FQZTZ6EEUD&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_4&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYFQR3FQZT.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 5G (Rainbow Fantasy, 128 GB)',\n 'price': '16,990'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-5g-starlight-black-128-gb/p/itm594222523bd8f?pid=MOBGB9TYF7P7RNYX&lid=LSTMOBGB9TYF7P7RNYX5GJVDV&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_5&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYF7P7RNYX.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 5G (Starlight Black, 128 GB)',\n 'price': '16,990'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-5g-starlight-black-128-gb/p/itm594222523bd8f?pid=MOBGB9TYNDFYKNQ6&lid=LSTMOBGB9TYNDFYKNQ6QMGH15&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_6&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYNDFYKNQ6.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 5G (Starlight Black, 128 GB)',\n 'price': '15,990'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-44w-starry-sky-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVHNBBBBP5&lid=LSTMOBGDRHVHNBBBBP5SY2MJL&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_7&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVHNBBBBP5.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 44W (Starry Sky, 128 GB)',\n 'price': '14,499'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-44w-starry-sky-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVMW2UDXZY&lid=LSTMOBGDRHVMW2UDXZYVNQXYN&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_8&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVMW2UDXZY.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 44W (Starry Sky, 128 GB)',\n 'price': '15,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-44w-midnight-galaxy-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVZN29ZJF4&lid=LSTMOBGDRHVZN29ZJF4WEHAX7&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_9&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVZN29ZJF4.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 44W (Midnight Galaxy, 128 GB)',\n 'price': '14,499'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-5g-starlight-black-128-gb/p/itm594222523bd8f?pid=MOBGB9TYEDGEXQRA&lid=LSTMOBGB9TYEDGEXQRAKBELKI&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_10&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGB9TYEDGEXQRA.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 5G (Starlight Black, 128 GB)',\n 'price': '19,990'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-5g-silky-white-128-gb/p/itm594222523bd8f?pid=MOBGHNKGG77MVYBG&lid=LSTMOBGHNKGG77MVYBGQMSTRZ&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_11&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGHNKGG77MVYBG.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 5G (Silky White, 128 GB)',\n 'price': '15,990'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/samsung-galaxy-f13-nightsky-green-64-gb/p/itmeadfda1bd23fa?pid=MOBGENJWF4KJTPEN&lid=LSTMOBGENJWF4KJTPENAUQVSZ&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_12&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGENJWF4KJTPEN.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'SAMSUNG Galaxy F13 (Nightsky Green, 64 GB)',\n 'price': '11,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/redmi-10-midnight-black-64-gb/p/itmd93641e4ebb47?pid=MOBGC9GYEBH3GZ4E&lid=LSTMOBGC9GYEBH3GZ4ESWAKTT&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_13&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGC9GYEBH3GZ4E.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'REDMI 10 (Midnight Black, 64 GB)',\n 'price': '10,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/motorola-e40-carbon-gray-64-gb/p/itm0ca635007c9e2?pid=MOBG2EMWUMUFGSZE&lid=LSTMOBG2EMWUMUFGSZEJNGZMU&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_14&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBG2EMWUMUFGSZE.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'MOTOROLA e40 (Carbon Gray, 64 GB)',\n 'price': '9,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/motorola-g40-fusion-frosted-champagne-128-gb/p/itm78278061a0e25?pid=MOBFWSF8Q3XAHTZH&lid=LSTMOBFWSF8Q3XAHTZHU8WKTS&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_15&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBFWSF8Q3XAHTZH.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'MOTOROLA G40 Fusion (Frosted Champagne, 128 GB)',\n 'price': '13,499'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/redmi-10-pacific-blue-64-gb/p/itm0f2a6a2112b75?pid=MOBGC9GYCHQZK9GW&lid=LSTMOBGC9GYCHQZK9GW8N0WII&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_16&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGC9GYCHQZK9GW.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'REDMI 10 (Pacific Blue, 64 GB)',\n 'price': '10,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/apple-iphone-11-black-64-gb/p/itm4e5041ba101fd?pid=MOBFWQ6BXGJCEYNY&lid=LSTMOBFWQ6BXGJCEYNYZXSHRJ&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_17&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBFWQ6BXGJCEYNY.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'APPLE iPhone 11 (Black, 64 GB)',\n 'price': '39,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/samsung-galaxy-f13-waterfall-blue-64-gb/p/itm583ef432b2b0c?pid=MOBGENJWBPFYJSFT&lid=LSTMOBGENJWBPFYJSFT1ZY7B0&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&spotlightTagId=BestsellerId_tyy%2F4io&srno=s_1_18&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGENJWBPFYJSFT.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'SAMSUNG Galaxy F13 (Waterfall Blue, 64 GB)',\n 'price': '11,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1x-space-blue-64-gb/p/itm9e8207e7825a9?pid=MOBGG56ZFNPMHBWE&lid=LSTMOBGG56ZFNPMHBWEB8Y2U5&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_19&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGG56ZFNPMHBWE.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1X (Space Blue, 64 GB)',\n 'price': '11,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1x-gravity-black-64-gb/p/itm9e8207e7825a9?pid=MOBGG56ZMXMNUCYF&lid=LSTMOBGG56ZMXMNUCYFQ5HT8S&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_20&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGG56ZMXMNUCYF.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1X (Gravity Black, 64 GB)',\n 'price': '11,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-44w-midnight-galaxy-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVXFVCGS23&lid=LSTMOBGDRHVXFVCGS23RDLBPG&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_21&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVXFVCGS23.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 44W (Midnight Galaxy, 128 GB)',\n 'price': '15,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/vivo-t1-44w-midnight-galaxy-128-gb/p/itm2a08ebbea3689?pid=MOBGDRHVWJUFTYQJ&lid=LSTMOBGDRHVWJUFTYQJ2SYXAC&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_22&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGDRHVWJUFTYQJ.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'vivo T1 44W (Midnight Galaxy, 128 GB)',\n 'price': '17,999'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/samsung-galaxy-f23-5g-aqua-blue-128-gb/p/itme54bc0c2292f4?pid=MOBGBKQF45XPEUHA&lid=LSTMOBGBKQF45XPEUHAYAHBJE&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_23&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBGBKQF45XPEUHA.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'SAMSUNG Galaxy F23 5G (Aqua Blue, 128 GB)',\n 'price': '18,499'}\n2022-11-20 22:15:02 [scrapy.core.scraper] DEBUG: Scraped from <200 https://www.flipkart.com/search?q=mobile>\n{'link': 'https://www.flipkart.com/motorola-e40-pink-clay-64-gb/p/itm5d6f2871d1bbf?pid=MOBG2EMW2ZUR4BFG&lid=LSTMOBG2EMW2ZUR4BFGEC0C0J&marketplace=FLIPKART&q=mobile&store=tyy%2F4io&srno=s_1_24&otracker=search&fm=organic&iid=57f0e76e-5286-4a0b-a6b6-f37cd935bfd2.MOBG2EMW2ZUR4BFG.SEARCH&ppt=None&ppn=None&ssid=hz6twhlgww0000001668960934301&qH=532c28d5412dd75b',\n 'name': 'MOTOROLA e40 (Pink Clay, 64 GB)',\n 'price': '9,999'}\n\n \n\n" ]
[ 1 ]
[]
[]
[ "csv", "python", "scrapy", "web_scraping" ]
stackoverflow_0074508265_csv_python_scrapy_web_scraping.txt
Q: Pandas: filling missing values by mean in each group This should be straightforward, but the closest thing I've found is this post: pandas: Filling missing values within a group, and I still can't solve my problem.... Suppose I have the following dataframe df = pd.DataFrame({'value': [1, np.nan, np.nan, 2, 3, 1, 3, np.nan, 3], 'name': ['A','A', 'B','B','B','B', 'C','C','C']}) name value 0 A 1 1 A NaN 2 B NaN 3 B 2 4 B 3 5 B 1 6 C 3 7 C NaN 8 C 3 and I'd like to fill in "NaN" with mean value in each "name" group, i.e. name value 0 A 1 1 A 1 2 B 2 3 B 2 4 B 3 5 B 1 6 C 3 7 C 3 8 C 3 I'm not sure where to go after: grouped = df.groupby('name').mean() Thanks a bunch. A: One way would be to use transform: >>> df name value 0 A 1 1 A NaN 2 B NaN 3 B 2 4 B 3 5 B 1 6 C 3 7 C NaN 8 C 3 >>> df["value"] = df.groupby("name").transform(lambda x: x.fillna(x.mean())) >>> df name value 0 A 1 1 A 1 2 B 2 3 B 2 4 B 3 5 B 1 6 C 3 7 C 3 8 C 3 A: fillna + groupby + transform + mean This seems intuitive: df['value'] = df['value'].fillna(df.groupby('name')['value'].transform('mean')) The groupby + transform syntax maps the groupwise mean to the index of the original dataframe. This is roughly equivalent to @DSM's solution, but avoids the need to define an anonymous lambda function. A: @DSM has IMO the right answer, but I'd like to share my generalization and optimization of the question: Multiple columns to group-by and having multiple value columns: df = pd.DataFrame( { 'category': ['X', 'X', 'X', 'X', 'X', 'X', 'Y', 'Y', 'Y'], 'name': ['A','A', 'B','B','B','B', 'C','C','C'], 'other_value': [10, np.nan, np.nan, 20, 30, 10, 30, np.nan, 30], 'value': [1, np.nan, np.nan, 2, 3, 1, 3, np.nan, 3], } ) ... gives ... category name other_value value 0 X A 10.0 1.0 1 X A NaN NaN 2 X B NaN NaN 3 X B 20.0 2.0 4 X B 30.0 3.0 5 X B 10.0 1.0 6 Y C 30.0 3.0 7 Y C NaN NaN 8 Y C 30.0 3.0 In this generalized case we would like to group by category and name, and impute only on value. This can be solved as follows: df['value'] = df.groupby(['category', 'name'])['value']\ .transform(lambda x: x.fillna(x.mean())) Notice the column list in the group-by clause, and that we select the value column right after the group-by. This makes the transformation only be run on that particular column. You could add it to the end, but then you will run it for all columns only to throw out all but one measure column at the end. A standard SQL query planner might have been able to optimize this, but pandas (0.19.2) doesn't seem to do this. Performance test by increasing the dataset by doing ... big_df = None for _ in range(10000): if big_df is None: big_df = df.copy() else: big_df = pd.concat([big_df, df]) df = big_df ... confirms that this increases the speed proportional to how many columns you don't have to impute: import pandas as pd from datetime import datetime def generate_data(): ... t = datetime.now() df = generate_data() df['value'] = df.groupby(['category', 'name'])['value']\ .transform(lambda x: x.fillna(x.mean())) print(datetime.now()-t) # 0:00:00.016012 t = datetime.now() df = generate_data() df["value"] = df.groupby(['category', 'name'])\ .transform(lambda x: x.fillna(x.mean()))['value'] print(datetime.now()-t) # 0:00:00.030022 On a final note you can generalize even further if you want to impute more than one column, but not all: df[['value', 'other_value']] = df.groupby(['category', 'name'])['value', 'other_value']\ .transform(lambda x: x.fillna(x.mean())) A: Shortcut: Groupby + Apply + Lambda + Fillna + Mean >>> df['value1']=df.groupby('name')['value'].apply(lambda x:x.fillna(x.mean())) >>> df.isnull().sum().sum() 0 This solution still works if you want to group by multiple columns to replace missing values. >>> df = pd.DataFrame({'value': [1, np.nan, np.nan, 2, 3, np.nan,np.nan, 4, 3], 'name': ['A','A', 'B','B','B','B', 'C','C','C'],'class':list('ppqqrrsss')}) >>> df['value']=df.groupby(['name','class'])['value'].apply(lambda x:x.fillna(x.mean())) >>> df value name class 0 1.0 A p 1 1.0 A p 2 2.0 B q 3 2.0 B q 4 3.0 B r 5 3.0 B r 6 3.5 C s 7 4.0 C s 8 3.0 C s A: I'd do it this way df.loc[df.value.isnull(), 'value'] = df.groupby('group').value.transform('mean') A: The featured high ranked answer only works for a pandas Dataframe with only two columns. If you have a more columns case use instead: df['Crude_Birth_rate'] = df.groupby("continent").Crude_Birth_rate.transform( lambda x: x.fillna(x.mean())) A: To summarize all above concerning the efficiency of the possible solution I have a dataset with 97 906 rows and 48 columns. I want to fill in 4 columns with the median of each group. The column I want to group has 26 200 groups. The first solution start = time.time() x = df_merged[continuous_variables].fillna(df_merged.groupby('domain_userid')[continuous_variables].transform('median')) print(time.time() - start) 0.10429811477661133 seconds The second solution start = time.time() for col in continuous_variables: df_merged.loc[df_merged[col].isnull(), col] = df_merged.groupby('domain_userid')[col].transform('median') print(time.time() - start) 0.5098445415496826 seconds The next solution I only performed on a subset since it was running too long. start = time.time() for col in continuous_variables: x = df_merged.head(10000).groupby('domain_userid')[col].transform(lambda x: x.fillna(x.median())) print(time.time() - start) 11.685635566711426 seconds The following solution follows the same logic as above. start = time.time() x = df_merged.head(10000).groupby('domain_userid')[continuous_variables].transform(lambda x: x.fillna(x.median())) print(time.time() - start) 42.630549907684326 seconds So it's quite important to choose the right method. Bear in mind that I noticed once a column was not a numeric the times were going up exponentially (makes sense as I was computing the median). A: def groupMeanValue(group): group['value'] = group['value'].fillna(group['value'].mean()) return group dft = df.groupby("name").transform(groupMeanValue) A: I know that is an old question. But I am quite surprised by the unanimity of apply/lambda answers here. Generally speaking, that is the second worst thing to do after iterating rows, from timing point of view. What I would do here is df.loc[df['value'].isna(), 'value'] = df.groupby('name')['value'].transform('mean') Or using fillna df['value'] = df['value'].fillna(df.groupby('name')['value'].transform('mean')) I've checked with timeit (because, again, unanimity for apply/lambda based solution made me doubt my instinct). And that is indeed 2.5 faster than the most upvoted solutions.
Pandas: filling missing values by mean in each group
This should be straightforward, but the closest thing I've found is this post: pandas: Filling missing values within a group, and I still can't solve my problem.... Suppose I have the following dataframe df = pd.DataFrame({'value': [1, np.nan, np.nan, 2, 3, 1, 3, np.nan, 3], 'name': ['A','A', 'B','B','B','B', 'C','C','C']}) name value 0 A 1 1 A NaN 2 B NaN 3 B 2 4 B 3 5 B 1 6 C 3 7 C NaN 8 C 3 and I'd like to fill in "NaN" with mean value in each "name" group, i.e. name value 0 A 1 1 A 1 2 B 2 3 B 2 4 B 3 5 B 1 6 C 3 7 C 3 8 C 3 I'm not sure where to go after: grouped = df.groupby('name').mean() Thanks a bunch.
[ "One way would be to use transform:\n>>> df\n name value\n0 A 1\n1 A NaN\n2 B NaN\n3 B 2\n4 B 3\n5 B 1\n6 C 3\n7 C NaN\n8 C 3\n>>> df[\"value\"] = df.groupby(\"name\").transform(lambda x: x.fillna(x.mean()))\n>>> df\n name value\n0 A 1\n1 A 1\n2 B 2\n3 B 2\n4 B 3\n5 B 1\n6 C 3\n7 C 3\n8 C 3\n\n", "fillna + groupby + transform + mean\nThis seems intuitive:\ndf['value'] = df['value'].fillna(df.groupby('name')['value'].transform('mean'))\n\nThe groupby + transform syntax maps the groupwise mean to the index of the original dataframe. This is roughly equivalent to @DSM's solution, but avoids the need to define an anonymous lambda function.\n", "@DSM has IMO the right answer, but I'd like to share my generalization and optimization of the question: Multiple columns to group-by and having multiple value columns:\ndf = pd.DataFrame(\n {\n 'category': ['X', 'X', 'X', 'X', 'X', 'X', 'Y', 'Y', 'Y'],\n 'name': ['A','A', 'B','B','B','B', 'C','C','C'],\n 'other_value': [10, np.nan, np.nan, 20, 30, 10, 30, np.nan, 30],\n 'value': [1, np.nan, np.nan, 2, 3, 1, 3, np.nan, 3],\n }\n)\n\n... gives ...\n category name other_value value\n0 X A 10.0 1.0\n1 X A NaN NaN\n2 X B NaN NaN\n3 X B 20.0 2.0\n4 X B 30.0 3.0\n5 X B 10.0 1.0\n6 Y C 30.0 3.0\n7 Y C NaN NaN\n8 Y C 30.0 3.0\n\nIn this generalized case we would like to group by category and name, and impute only on value.\nThis can be solved as follows:\ndf['value'] = df.groupby(['category', 'name'])['value']\\\n .transform(lambda x: x.fillna(x.mean()))\n\nNotice the column list in the group-by clause, and that we select the value column right after the group-by. This makes the transformation only be run on that particular column. You could add it to the end, but then you will run it for all columns only to throw out all but one measure column at the end. A standard SQL query planner might have been able to optimize this, but pandas (0.19.2) doesn't seem to do this.\nPerformance test by increasing the dataset by doing ...\nbig_df = None\nfor _ in range(10000):\n if big_df is None:\n big_df = df.copy()\n else:\n big_df = pd.concat([big_df, df])\ndf = big_df\n\n... confirms that this increases the speed proportional to how many columns you don't have to impute:\nimport pandas as pd\nfrom datetime import datetime\n\ndef generate_data():\n ...\n\nt = datetime.now()\ndf = generate_data()\ndf['value'] = df.groupby(['category', 'name'])['value']\\\n .transform(lambda x: x.fillna(x.mean()))\nprint(datetime.now()-t)\n\n# 0:00:00.016012\n\nt = datetime.now()\ndf = generate_data()\ndf[\"value\"] = df.groupby(['category', 'name'])\\\n .transform(lambda x: x.fillna(x.mean()))['value']\nprint(datetime.now()-t)\n\n# 0:00:00.030022\n\nOn a final note you can generalize even further if you want to impute more than one column, but not all:\ndf[['value', 'other_value']] = df.groupby(['category', 'name'])['value', 'other_value']\\\n .transform(lambda x: x.fillna(x.mean()))\n\n", "Shortcut:\n\nGroupby + Apply + Lambda + Fillna + Mean\n\n>>> df['value1']=df.groupby('name')['value'].apply(lambda x:x.fillna(x.mean()))\n>>> df.isnull().sum().sum()\n 0 \n\nThis solution still works if you want to group by multiple columns to replace missing values.\n>>> df = pd.DataFrame({'value': [1, np.nan, np.nan, 2, 3, np.nan,np.nan, 4, 3], \n 'name': ['A','A', 'B','B','B','B', 'C','C','C'],'class':list('ppqqrrsss')}) \n\n \n>>> df['value']=df.groupby(['name','class'])['value'].apply(lambda x:x.fillna(x.mean()))\n \n>>> df\n value name class\n 0 1.0 A p\n 1 1.0 A p\n 2 2.0 B q\n 3 2.0 B q\n 4 3.0 B r\n 5 3.0 B r\n 6 3.5 C s\n 7 4.0 C s\n 8 3.0 C s\n \n\n", "I'd do it this way\ndf.loc[df.value.isnull(), 'value'] = df.groupby('group').value.transform('mean')\n\n", "The featured high ranked answer only works for a pandas Dataframe with only two columns. If you have a more columns case use instead: \ndf['Crude_Birth_rate'] = df.groupby(\"continent\").Crude_Birth_rate.transform(\n lambda x: x.fillna(x.mean()))\n\n", "To summarize all above concerning the efficiency of the possible solution\nI have a dataset with 97 906 rows and 48 columns.\nI want to fill in 4 columns with the median of each group.\nThe column I want to group has 26 200 groups.\nThe first solution\nstart = time.time()\nx = df_merged[continuous_variables].fillna(df_merged.groupby('domain_userid')[continuous_variables].transform('median'))\nprint(time.time() - start)\n0.10429811477661133 seconds\n\nThe second solution\nstart = time.time()\nfor col in continuous_variables:\n df_merged.loc[df_merged[col].isnull(), col] = df_merged.groupby('domain_userid')[col].transform('median')\nprint(time.time() - start)\n0.5098445415496826 seconds\n\nThe next solution I only performed on a subset since it was running too long.\nstart = time.time()\nfor col in continuous_variables:\n x = df_merged.head(10000).groupby('domain_userid')[col].transform(lambda x: x.fillna(x.median()))\nprint(time.time() - start)\n11.685635566711426 seconds\n\nThe following solution follows the same logic as above.\nstart = time.time()\nx = df_merged.head(10000).groupby('domain_userid')[continuous_variables].transform(lambda x: x.fillna(x.median()))\nprint(time.time() - start)\n42.630549907684326 seconds\n\nSo it's quite important to choose the right method.\nBear in mind that I noticed once a column was not a numeric the times were going up exponentially (makes sense as I was computing the median).\n", "def groupMeanValue(group):\n group['value'] = group['value'].fillna(group['value'].mean())\n return group\n\ndft = df.groupby(\"name\").transform(groupMeanValue)\n\n", "I know that is an old question. But I am quite surprised by the unanimity of apply/lambda answers here.\nGenerally speaking, that is the second worst thing to do after iterating rows, from timing point of view.\nWhat I would do here is\ndf.loc[df['value'].isna(), 'value'] = df.groupby('name')['value'].transform('mean')\n\nOr using fillna\ndf['value'] = df['value'].fillna(df.groupby('name')['value'].transform('mean'))\n\nI've checked with timeit (because, again, unanimity for apply/lambda based solution made me doubt my instinct). And that is indeed 2.5 faster than the most upvoted solutions.\n" ]
[ 129, 104, 27, 16, 14, 6, 4, 2, 0 ]
[ "df.fillna(df.groupby(['name'], as_index=False).mean(), inplace=True)\n\n", "You can also use \"dataframe or table_name\".apply(lambda x: x.fillna(x.mean())).\n" ]
[ -1, -1 ]
[ "fillna", "imputation", "pandas", "pandas_groupby", "python" ]
stackoverflow_0019966018_fillna_imputation_pandas_pandas_groupby_python.txt
Q: How to remove trailing lines when appending to a file The problem is when I add a student record (append txt to my file) for the first time a major blank gap is added username,passcode jack,Adidas123_ man,Adidas123_ kal,Adidas123_ ll,Adidas123_ I have tried to use the .strip() function it did not seem to help , I was expecting my csv file to appear like this username,passcode jack,Adidas123_ man,Ndidas123_ kal,Mdidas123_ ll,Zdidas123_ def add_user(): infile = open("students.csv", "a") username = str((input('enter your username:'))) passcode = "Adidas123_" data1 = f"\n{username},{passcode}" data = data1.strip() # strips white space infile.write(data +"\n") # appends to new line print('record added succesfully !') infile.close() A: Your code is correct, the problem must reside in your students.csv file. You may have by mistake left a newline in the file. Check if you students.csv is: username,password or username,password with a newline. Hope my answer helps! A: As mentioned by RandomCoder59. Right now we can only see add_user() function in your sample code. There is a possibility of you are adding newline via some other method to the same file. infile = open("students.csv", "a") "a" parameter here represents append mode. Please have a look into other methods in python code aswell. Hope this helps, Happy coding. A: It is not very elegant, but you could use your code as is and then take the empty line at the end. Basically your final text would be a string like this: text_file = 'username,passcode\n\njack,Adidas123_\nman,Adidas123_\nkal,Adidas123_\nll,Adidas123_' So you can just replace the double newline with a single one: print(text_file.replace('\n\n', '\n')) username,passcode jack,Adidas123_ man,Adidas123_ kal,Adidas123_ ll,Adidas123_ For a better solution you should share more details of your code... A: I think the issue is in the original file that is being opened. You don't show its original value or how it was created. Likely, the file has starts with two new lines after the headers. The strip method won't have any effect on the data that is already in the file. Take this example code (slightly adapted from your own): def add_user(file, in_value:str): passcode = "Adidas123_" clean_value = in_value.strip() # strips white space line = f'{clean_value},{passcode}' file.write(line +"\n") # appends to new line print('record added succesfully !') return file If I start with a file that has two new lines after the header: file = io.StringIO('username,passcode\n\n') file.seek(0, io.SEEK_END) Then I add two values, the first having a bunch of newlines, and the second having none: # Add a username with a bunch of newlines first_user = add_user(file, 'username1\n\n\n') # Add a username with no newlines second_user = add_user(first_user, 'username2') The file contains the following text: 'username,passcode\n\nusername1,Adidas123_\nusername2,Adidas123_\n So you can see the values are being correctly added but there is an issue with the original file.
How to remove trailing lines when appending to a file
The problem is when I add a student record (append txt to my file) for the first time a major blank gap is added username,passcode jack,Adidas123_ man,Adidas123_ kal,Adidas123_ ll,Adidas123_ I have tried to use the .strip() function it did not seem to help , I was expecting my csv file to appear like this username,passcode jack,Adidas123_ man,Ndidas123_ kal,Mdidas123_ ll,Zdidas123_ def add_user(): infile = open("students.csv", "a") username = str((input('enter your username:'))) passcode = "Adidas123_" data1 = f"\n{username},{passcode}" data = data1.strip() # strips white space infile.write(data +"\n") # appends to new line print('record added succesfully !') infile.close()
[ "Your code is correct, the problem must reside in your students.csv file. You may have by mistake left a newline in the file.\nCheck if you students.csv is:\nusername,password\n\nor\nusername,password\n\n\nwith a newline.\nHope my answer helps!\n", "As mentioned by RandomCoder59.\nRight now we can only see add_user() function in your sample code.\nThere is a possibility of you are adding newline via some other method to the same file.\ninfile = open(\"students.csv\", \"a\")\n\n\"a\" parameter here represents append mode. Please have a look into other methods in python code aswell. Hope this helps, Happy coding.\n", "It is not very elegant, but you could use your code as is and then take the empty line at the end. Basically your final text would be a string like this:\ntext_file = 'username,passcode\\n\\njack,Adidas123_\\nman,Adidas123_\\nkal,Adidas123_\\nll,Adidas123_'\n\nSo you can just replace the double newline with a single one:\nprint(text_file.replace('\\n\\n', '\\n'))\n\nusername,passcode\njack,Adidas123_\nman,Adidas123_\nkal,Adidas123_\nll,Adidas123_\n\nFor a better solution you should share more details of your code...\n", "I think the issue is in the original file that is being opened. You don't show its original value or how it was created. Likely, the file has starts with two new lines after the headers. The strip method won't have any effect on the data that is already in the file.\nTake this example code (slightly adapted from your own):\ndef add_user(file, in_value:str):\n passcode = \"Adidas123_\"\n \n clean_value = in_value.strip() # strips white space\n line = f'{clean_value},{passcode}'\n file.write(line +\"\\n\") # appends to new line\n print('record added succesfully !')\n return file\n\nIf I start with a file that has two new lines after the header:\nfile = io.StringIO('username,passcode\\n\\n')\nfile.seek(0, io.SEEK_END)\n\nThen I add two values, the first having a bunch of newlines, and the second having none:\n# Add a username with a bunch of newlines\nfirst_user = add_user(file, 'username1\\n\\n\\n')\n# Add a username with no newlines\nsecond_user = add_user(first_user, 'username2')\n\nThe file contains the following text:\n'username,passcode\\n\\nusername1,Adidas123_\\nusername2,Adidas123_\\n\n\nSo you can see the values are being correctly added but there is an issue with the original file.\n" ]
[ 0, 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0074509646_python.txt
Q: djangocms Currently installed Django version 3.2.15 differs from the declared 3.1 I am running an AWS Bitnami Django instance. Django 3.2.15 installed by default. Django documentation recommends version django 3.2 so all is good there. Once installed I am having a hard time getting djangocms to create a new project. I keep getting dependency errors when I issue the command djangocms -f -p . projectname I received the following: Currently installed Django version 3.2.15 differs from the declared 3.1. Please check the given `--django-version` installer argument, your virtualenv configuration and any package forcing a different Django version A: I couldn't figure out why I was receiving these errors when I knew I had more recent versions installed and followed the documentation correctly. Following these steps should get the issue resolved, it is what worked for me: cd /home/projects-folder/ rm -R myproject/ rm -R venv/ (if you used a virtualenv) python3 -m venv venv source venv/bin/activate python3 -m pip install --upgrade pip pip install django-cms pip install djangocms-installer If you didn't use a virtualenv LOG OUT! LOG IN! Without logging out the site-packages will not reflect changes made. If you are using a virtualenv I highly recommend removing the env, deleting the env folder and start all over with a new virtualenv. This was definitely frustrating for me so hopefully this can be of help to someone.
djangocms Currently installed Django version 3.2.15 differs from the declared 3.1
I am running an AWS Bitnami Django instance. Django 3.2.15 installed by default. Django documentation recommends version django 3.2 so all is good there. Once installed I am having a hard time getting djangocms to create a new project. I keep getting dependency errors when I issue the command djangocms -f -p . projectname I received the following: Currently installed Django version 3.2.15 differs from the declared 3.1. Please check the given `--django-version` installer argument, your virtualenv configuration and any package forcing a different Django version
[ "I couldn't figure out why I was receiving these errors when I knew I had more recent versions installed and followed the documentation correctly. Following these steps should get the issue resolved, it is what worked for me:\ncd /home/projects-folder/\nrm -R myproject/\nrm -R venv/ (if you used a virtualenv)\npython3 -m venv venv\nsource venv/bin/activate\n\npython3 -m pip install --upgrade pip\npip install django-cms\npip install djangocms-installer\n\nIf you didn't use a virtualenv LOG OUT! LOG IN! Without logging out the site-packages will not reflect changes made.\nIf you are using a virtualenv I highly recommend removing the env, deleting the env folder and start all over with a new virtualenv. This was definitely frustrating for me so hopefully this can be of help to someone.\n" ]
[ 0 ]
[]
[]
[ "django", "django_cms", "pip", "python" ]
stackoverflow_0074509904_django_django_cms_pip_python.txt
Q: How to init an empty np array and add one-dimensional ones to it? I try to create an empty array into which I can add other arrays and get a matrix: arr = np.array([]) arr = np.append(arr, [1, 2]) arr = np.append(arr, [3, 4]) As a result, I get a one-dimensional array: array([1., 2., 3., 4.]) Expected result: array([[1., 2.], [3., 4.]]) I tried to init an array as multidimensional arr = np.array([[]]), and not append but concatenate arrays arr = np.concatenate((arr, [1, 2]), axis=0). Both options did not work, the result was again a one-dimensional array. So, what's the best way to fix this and get a two-dimensional array as a result? A: You need to start with an array with the right dimensions and append using the same dimensions. You also need to note whether you are appending rows or columns, otherwise np.append will flatten the result into a 1d array. Since you are starting with an empty array, you'll need to use a function that lets you specify the shape on creation. And then wrap the new rows in an outer list to make them 2d. >>> arr=np.zeros((0,2), dtype=int) >>> arr = np.append(arr, [[1,2]], axis=0) >>> arr = np.append(arr, [[3,4]], axis=0) >>> arr array([[1, 2], [3, 4]]) As a side note, each append copies the array, which gets more and more expensive as the array grows. If you are using append to create a large array row by row, you should consider alternatives. A: Collect the lists in a list, and do one array construction at the end: In [687]: alist = [] ...: alist.append([1,2]) ...: alist.append([3,4]) ...: arr = np.array(alist) In [688]: alist Out[688]: [[1, 2], [3, 4]] In [689]: arr Out[689]: array([[1, 2], [3, 4]]) Repeated np.append and np.concatenate is inefficient, and hard to get right. Why are you surprised that you got a 1d array. Did you skip the np.append docs? Without axis that's what is does. With axis it just does np.concatenate. And as you found concatenate is picky about dimensions. You can join a (0,2) with a (1,2) on axis 0, but not a (1,0) with a (2,). Imitating list methods with arrays doesn't work - unless you the read documentation carefully. "empty array" is an ambiguous phrase. np.empty((1000,20,30)) makes a very large array of "arbitrary" values. np.array([]) makes a float dtype array with shape (0,). That has 0 elements, but so does an array with shape (10,0,1000). Lists are inherently 1d; arrays have both shape and dtype. A: Numpy arrays have a fixed size. Therefor appending a value to an array is not possible. There is a number of ways to achieve your goal. The simplest of them is probably just using the np.array method: np.array([[1., 2.], [3., 4.]]) You can also create a new array of the desired size containing all zeros using np.zeros() and copy the values into this new array afterwards like this: 1 arr = np.zeros((2, 2)) arr[0, :] = [1, 2] arr[1, :] = [3, 4] Or you can stack the row arrays using np.stack: np.stack([[1, 2], [3, 4]]) Edit If you do not know the required size of the array at creation time you can create a two dimensional array and use np.append to create a copy of the initial array with the new value appended to it: arr = np.zeros((0, 2)) # Create an empty array with a row length of 2 arr = np.append(arr, [[1, 2]], axis=0) arr = np.append(arr, [[3, 4]], axis=0) The double square brackets are crucial because this makes the array two dimensional.
How to init an empty np array and add one-dimensional ones to it?
I try to create an empty array into which I can add other arrays and get a matrix: arr = np.array([]) arr = np.append(arr, [1, 2]) arr = np.append(arr, [3, 4]) As a result, I get a one-dimensional array: array([1., 2., 3., 4.]) Expected result: array([[1., 2.], [3., 4.]]) I tried to init an array as multidimensional arr = np.array([[]]), and not append but concatenate arrays arr = np.concatenate((arr, [1, 2]), axis=0). Both options did not work, the result was again a one-dimensional array. So, what's the best way to fix this and get a two-dimensional array as a result?
[ "You need to start with an array with the right dimensions and append using the same dimensions. You also need to note whether you are appending rows or columns, otherwise np.append will flatten the result into a 1d array. Since you are starting with an empty array, you'll need to use a function that lets you specify the shape on creation. And then wrap the new rows in an outer list to make them 2d.\n>>> arr=np.zeros((0,2), dtype=int)\n>>> arr = np.append(arr, [[1,2]], axis=0)\n>>> arr = np.append(arr, [[3,4]], axis=0)\n>>> arr\narray([[1, 2],\n [3, 4]])\n\nAs a side note, each append copies the array, which gets more and more expensive as the array grows. If you are using append to create a large array row by row, you should consider alternatives.\n", "Collect the lists in a list, and do one array construction at the end:\nIn [687]: alist = []\n ...: alist.append([1,2])\n ...: alist.append([3,4])\n ...: arr = np.array(alist) \nIn [688]: alist\nOut[688]: [[1, 2], [3, 4]] \nIn [689]: arr\nOut[689]: \narray([[1, 2],\n [3, 4]])\n\nRepeated np.append and np.concatenate is inefficient, and hard to get right.\nWhy are you surprised that you got a 1d array. Did you skip the np.append docs? Without axis that's what is does. With axis it just does np.concatenate. And as you found concatenate is picky about dimensions. You can join a (0,2) with a (1,2) on axis 0, but not a (1,0) with a (2,).\nImitating list methods with arrays doesn't work - unless you the read documentation carefully.\n\"empty array\" is an ambiguous phrase. np.empty((1000,20,30)) makes a very large array of \"arbitrary\" values. np.array([]) makes a float dtype array with shape (0,). That has 0 elements, but so does an array with shape (10,0,1000). Lists are inherently 1d; arrays have both shape and dtype.\n", "Numpy arrays have a fixed size. Therefor appending a value to an array is not possible.\nThere is a number of ways to achieve your goal. The simplest of them is probably just using the np.array method:\nnp.array([[1., 2.], [3., 4.]])\n\nYou can also create a new array of the desired size containing all zeros using np.zeros() and copy the values into this new array afterwards like this:\n1\narr = np.zeros((2, 2))\narr[0, :] = [1, 2]\narr[1, :] = [3, 4]\n\nOr you can stack the row arrays using np.stack:\nnp.stack([[1, 2], [3, 4]])\n\nEdit\nIf you do not know the required size of the array at creation time you can create a two dimensional array and use np.append to create a copy of the initial array with the new value appended to it:\narr = np.zeros((0, 2)) # Create an empty array with a row length of 2\narr = np.append(arr, [[1, 2]], axis=0)\narr = np.append(arr, [[3, 4]], axis=0)\n\nThe double square brackets are crucial because this makes the array two dimensional.\n" ]
[ 1, 1, 0 ]
[]
[]
[ "arrays", "numpy", "python" ]
stackoverflow_0074509525_arrays_numpy_python.txt
Q: Fill NaN based on max value from a group and another string column with the value at the NaN row I have an input data as shown: df = pd.DataFrame({"colony" : [22, 22, 22, 33, 33, 33], "measure" : [np.nan, 7, 11, 13, np.nan, 9,], "net/gross" : [np.nan, "gross", "net", "gross", "np.nan", "net"]}) df colony measure net/gross 0 22 NaN NaN 1 22 7 gross 2 22 11 net 3 33 13 gross 4 33 NaN NaN 5 33 9 net I want to fill the NaN in the measure column with maximum value from each group of the colony, then fill the NaN in the net/gross column with the net/gross value at the row where the measure was maximum (e.g fill the NaN at index 0 with the value corresponding to where the measure was max which is "net") and create a remark column to document all the NaN filled rows as "max_filled" and the other rows as "unchanged" to arrive at an output as below: colony measure net/gross remarks 0 22 11 net max_filled 1 22 7 gross unchanged 2 22 11 net unchanged 3 33 13 gross unchanged 4 33 13 gross max_filled 5 33 9 net unchanged A: My solution What I would do is compute a column of max mx=df.groupby('colony').measure.transform(max) and a list of rows to be filled f=df.measure.isna() And then use them to fill what you want df['remarks']='unchanged' df.loc[f, 'measure']=mx df.loc[f, 'net/gross']=df[f]['net/gross'] df.loc[f, 'remarks']='max_filled' Remark on other answers to similar question Note that answers to this simpler question, which was just filling NaN with mean value of each group, which you could easily adapt replacing mean with max, but which would not help filling the 2 other columns, seem to be unanimously promoting lambda based solutions. That is generally a bad idea. I mean, I love lambda, I come from lambda calculus. But in pandas, apply or similar method are just the next worst thing after bad old for loops on the rows (and even sometimes, for loops are faster). The unanimity of the answers made me doubt tho. But, well, timeit close the debate: even for that simpler problem, my solution is faster than the answers to this questions. That is, even just filling NaN is faster doing it this way mx=df.groupby('colony').measure.transform(max) f=df.measure.isna() df.loc[f,'measure']=mx Rather than the proposed way df["measure"] = df.groupby("colony")["measure"].transform(lambda x: x.fillna(x.mean())) So, I was at first trying to see how that previous answer could be adapted to your more complex case (where so transformed rows of measure should also impact net/gross and remarks). But there is no reason to do so, since it is faster (2.5 times faster) to compute a whose column of max, and then do simple column affectation. So, morale is Never ever use lambda (and for, and apply) when you can avoid it on dataframes. Even at the cost of computing a whole column of max values, whose only a fraction will really be used, it is better to stick with whole column algebra. A: Here is another way using .transform('max') and .transform('idxmax') g = df.groupby('colony')['measure'] measure_max, ng_max = g.transform('max'),df.loc[g.transform('idxmax'),'net/gross'].reset_index(drop=True) (df.fillna({'measure':measure_max,'net/gross':ng_max}) .assign(remarks = np.where(df['net/gross'].isna(),'max_filled','unchanged'))) Output: colony measure net/gross remarks 0 22 11.0 net max_filled 1 22 7.0 gross unchanged 2 22 11.0 net unchanged 3 33 13.0 gross unchanged 4 33 13.0 gross max_filled 5 33 9.0 net unchanged
Fill NaN based on max value from a group and another string column with the value at the NaN row
I have an input data as shown: df = pd.DataFrame({"colony" : [22, 22, 22, 33, 33, 33], "measure" : [np.nan, 7, 11, 13, np.nan, 9,], "net/gross" : [np.nan, "gross", "net", "gross", "np.nan", "net"]}) df colony measure net/gross 0 22 NaN NaN 1 22 7 gross 2 22 11 net 3 33 13 gross 4 33 NaN NaN 5 33 9 net I want to fill the NaN in the measure column with maximum value from each group of the colony, then fill the NaN in the net/gross column with the net/gross value at the row where the measure was maximum (e.g fill the NaN at index 0 with the value corresponding to where the measure was max which is "net") and create a remark column to document all the NaN filled rows as "max_filled" and the other rows as "unchanged" to arrive at an output as below: colony measure net/gross remarks 0 22 11 net max_filled 1 22 7 gross unchanged 2 22 11 net unchanged 3 33 13 gross unchanged 4 33 13 gross max_filled 5 33 9 net unchanged
[ "My solution\nWhat I would do is compute a column of max\nmx=df.groupby('colony').measure.transform(max)\n\nand a list of rows to be filled\nf=df.measure.isna()\n\nAnd then use them to fill what you want\ndf['remarks']='unchanged'\ndf.loc[f, 'measure']=mx\ndf.loc[f, 'net/gross']=df[f]['net/gross']\ndf.loc[f, 'remarks']='max_filled'\n\nRemark on other answers to similar question\nNote that answers to this simpler question, which was just filling NaN with mean value of each group, which you could easily adapt replacing mean with max, but which would not help filling the 2 other columns, seem to be unanimously promoting lambda based solutions.\nThat is generally a bad idea. I mean, I love lambda, I come from lambda calculus. But in pandas, apply or similar method are just the next worst thing after bad old for loops on the rows (and even sometimes, for loops are faster).\nThe unanimity of the answers made me doubt tho. But, well, timeit close the debate: even for that simpler problem, my solution is faster than the answers to this questions.\nThat is, even just filling NaN is faster doing it this way\nmx=df.groupby('colony').measure.transform(max)\nf=df.measure.isna()\ndf.loc[f,'measure']=mx\n\nRather than the proposed way\ndf[\"measure\"] = df.groupby(\"colony\")[\"measure\"].transform(lambda x: x.fillna(x.mean()))\n\nSo, I was at first trying to see how that previous answer could be adapted to your more complex case (where so transformed rows of measure should also impact net/gross and remarks). But there is no reason to do so, since it is faster (2.5 times faster) to compute a whose column of max, and then do simple column affectation.\nSo, morale is\nNever ever use lambda (and for, and apply) when you can avoid it on dataframes.\nEven at the cost of computing a whole column of max values, whose only a fraction will really be used, it is better to stick with whole column algebra.\n", "Here is another way using .transform('max') and .transform('idxmax')\ng = df.groupby('colony')['measure']\n\nmeasure_max, ng_max = g.transform('max'),df.loc[g.transform('idxmax'),'net/gross'].reset_index(drop=True)\n\n(df.fillna({'measure':measure_max,'net/gross':ng_max})\n.assign(remarks = np.where(df['net/gross'].isna(),'max_filled','unchanged')))\n\nOutput:\n colony measure net/gross remarks\n0 22 11.0 net max_filled\n1 22 7.0 gross unchanged\n2 22 11.0 net unchanged\n3 33 13.0 gross unchanged\n4 33 13.0 gross max_filled\n5 33 9.0 net unchanged\n\n" ]
[ 0, 0 ]
[]
[]
[ "dataframe", "numpy", "pandas", "python" ]
stackoverflow_0074509414_dataframe_numpy_pandas_python.txt
Q: Setting colours to multiple lines in matplotlib (python) I have a graph computed from matplotlib, containing six plotted lines, and I want to know what I'm doing wrong for assigning each of my lines a unique colour. I've got a list for the colours using hex codes, and each listx in "lists" contains the y axis data for each line: colours = ["#ffa500", "#008000", "#ff0000", "#800080", "#7f6000", "#ffa7b6"] lists = [list1, list2, list3, list4, list5, list6] and I'm trying to assign each colour for each line: for c in range(len(colours)): for l in lists: plt.plot(x, l, colours[c]) What I'm getting currently is every line being assigned pink (the last entry in the colours list), rather than each colour corresponding to each list (e.g. list2 needs color #008000). I'm relatively new to programming, so if someone can explain what's wrong and how I can fix this that would be much appreciated, thanks! A: I think what is probably happening in your code is because you are looping through your colours list index and then for each index in your colours you are looping through lists. So what will happen is you will get to the end of your colours list indexes (pink) and then loop through lists plotting each using that colour. To fix this (if your colours and lists lists will always be the same length) do something like: for x in range(len(colours)): plt.plot(x, lists[x], colours[x]) I hope this works for you.
Setting colours to multiple lines in matplotlib (python)
I have a graph computed from matplotlib, containing six plotted lines, and I want to know what I'm doing wrong for assigning each of my lines a unique colour. I've got a list for the colours using hex codes, and each listx in "lists" contains the y axis data for each line: colours = ["#ffa500", "#008000", "#ff0000", "#800080", "#7f6000", "#ffa7b6"] lists = [list1, list2, list3, list4, list5, list6] and I'm trying to assign each colour for each line: for c in range(len(colours)): for l in lists: plt.plot(x, l, colours[c]) What I'm getting currently is every line being assigned pink (the last entry in the colours list), rather than each colour corresponding to each list (e.g. list2 needs color #008000). I'm relatively new to programming, so if someone can explain what's wrong and how I can fix this that would be much appreciated, thanks!
[ "I think what is probably happening in your code is because you are looping through your colours list index and then for each index in your colours you are looping through lists.\nSo what will happen is you will get to the end of your colours list indexes (pink) and then loop through lists plotting each using that colour.\nTo fix this (if your colours and lists lists will always be the same length) do something like:\nfor x in range(len(colours)):\n plt.plot(x, lists[x], colours[x])\n\nI hope this works for you.\n" ]
[ 0 ]
[]
[]
[ "matplotlib", "python" ]
stackoverflow_0074509790_matplotlib_python.txt
Q: Time formatting with strptime in a dataframe I'm trying to read a CSV file, where some columns have date or time values. I started with this: import pandas as pd from datetime import datetime timeparse = lambda x: datetime.strptime(x, '%H:%M:%S.%f') lap_times = pd.read_csv( 'data/lap_times.csv', parse_dates={'time_datetime': ['time']}, date_parser=timeparse ) But sometimes the row of the column has a format %M:%S.%f and sometimes has %H:%M:%S.%f. So I got an error. I thought about creating a function like this, but I can't see how I would pass an argument to the function to do the transformation for each row of the column passed as an argument. def timeparse_1(): try: return datetime.strptime(x, '%H:%M:%S.%f') finally: return datetime.strptime(x, '%M:%S.%f') But I'm getting: NameError: name 'x' is not defined A: It would be easier if you post a sample of your CSV file, but something like this may work: import pandas as pd from datetime import datetime as dt df = pd.DataFrame({'Time': ['12:34:56', '12:34:56.789']}) df.Time = df.Time.apply(lambda x: dt.strptime(x, '%H:%M:%S.%f') if len(x) > 8 else dt.strptime(x, '%H:%M:%S')) Which will result in: >>> df 0 1900-01-01 12:34:56.000 1 1900-01-01 12:34:56.789 Name: Time, dtype: datetime64[ns] >>> But there is a better way: import pandas as pd df = pd.DataFrame({'Time': ['12:34:56', '12:34:56.789']}) df.Time = df.Time.apply(pd.to_datetime) Which results in the following: >>> df 0 2022-11-20 12:34:56.000 1 2022-11-20 12:34:56.789 Name: Time, dtype: datetime64[ns] >>> Using the day of today to complete the datetime object.
Time formatting with strptime in a dataframe
I'm trying to read a CSV file, where some columns have date or time values. I started with this: import pandas as pd from datetime import datetime timeparse = lambda x: datetime.strptime(x, '%H:%M:%S.%f') lap_times = pd.read_csv( 'data/lap_times.csv', parse_dates={'time_datetime': ['time']}, date_parser=timeparse ) But sometimes the row of the column has a format %M:%S.%f and sometimes has %H:%M:%S.%f. So I got an error. I thought about creating a function like this, but I can't see how I would pass an argument to the function to do the transformation for each row of the column passed as an argument. def timeparse_1(): try: return datetime.strptime(x, '%H:%M:%S.%f') finally: return datetime.strptime(x, '%M:%S.%f') But I'm getting: NameError: name 'x' is not defined
[ "It would be easier if you post a sample of your CSV file, but something like this may work:\nimport pandas as pd\nfrom datetime import datetime as dt\n\ndf = pd.DataFrame({'Time': ['12:34:56', '12:34:56.789']})\n\ndf.Time = df.Time.apply(lambda x: dt.strptime(x, '%H:%M:%S.%f') if len(x) > 8 else dt.strptime(x, '%H:%M:%S'))\n\nWhich will result in:\n>>> df\n0 1900-01-01 12:34:56.000\n1 1900-01-01 12:34:56.789\nName: Time, dtype: datetime64[ns]\n>>> \n\nBut there is a better way:\nimport pandas as pd\n\ndf = pd.DataFrame({'Time': ['12:34:56', '12:34:56.789']})\n\ndf.Time = df.Time.apply(pd.to_datetime)\n\nWhich results in the following:\n>>> df\n0 2022-11-20 12:34:56.000\n1 2022-11-20 12:34:56.789\nName: Time, dtype: datetime64[ns]\n>>>\n\nUsing the day of today to complete the datetime object.\n" ]
[ 0 ]
[]
[]
[ "dataframe", "python" ]
stackoverflow_0074508325_dataframe_python.txt
Q: Why is __init__() always called after __new__()? I'm just trying to streamline one of my classes and have introduced some functionality in the same style as the flyweight design pattern. However, I'm a bit confused as to why __init__ is always called after __new__. I wasn't expecting this. Can anyone tell me why this is happening and how I can implement this functionality otherwise? (Apart from putting the implementation into the __new__ which feels quite hacky.) Here's an example: class A(object): _dict = dict() def __new__(cls): if 'key' in A._dict: print "EXISTS" return A._dict['key'] else: print "NEW" return super(A, cls).__new__(cls) def __init__(self): print "INIT" A._dict['key'] = self print "" a1 = A() a2 = A() a3 = A() Outputs: NEW INIT EXISTS INIT EXISTS INIT Why? A: Use __new__ when you need to control the creation of a new instance. Use __init__ when you need to control initialization of a new instance. __new__ is the first step of instance creation. It's called first, and is responsible for returning a new instance of your class. In contrast, __init__ doesn't return anything; it's only responsible for initializing the instance after it's been created. In general, you shouldn't need to override __new__ unless you're subclassing an immutable type like str, int, unicode or tuple. From April 2008 post: When to use __new__ vs. __init__? on mail.python.org. You should consider that what you are trying to do is usually done with a Factory and that's the best way to do it. Using __new__ is not a good clean solution so please consider the usage of a factory. Here's a good example: ActiveState Fᴀᴄᴛᴏʀʏ ᴘᴀᴛᴛᴇʀɴ Recipe. A: __new__ is static class method, while __init__ is instance method. __new__ has to create the instance first, so __init__ can initialize it. Note that __init__ takes self as parameter. Until you create instance there is no self. Now, I gather, that you're trying to implement singleton pattern in Python. There are a few ways to do that. Also, as of Python 2.6, you can use class decorators. def singleton(cls): instances = {} def getinstance(): if cls not in instances: instances[cls] = cls() return instances[cls] return getinstance @singleton class MyClass: ... A: In most well-known OO languages, an expression like SomeClass(arg1, arg2) will allocate a new instance, initialise the instance's attributes, and then return it. In most well-known OO languages, the "initialise the instance's attributes" part can be customised for each class by defining a constructor, which is basically just a block of code that operates on the new instance (using the arguments provided to the constructor expression) to set up whatever initial conditions are desired. In Python, this corresponds to the class' __init__ method. Python's __new__ is nothing more and nothing less than similar per-class customisation of the "allocate a new instance" part. This of course allows you to do unusual things such as returning an existing instance rather than allocating a new one. So in Python, we shouldn't really think of this part as necessarily involving allocation; all that we require is that __new__ comes up with a suitable instance from somewhere. But it's still only half of the job, and there's no way for the Python system to know that sometimes you want to run the other half of the job (__init__) afterwards and sometimes you don't. If you want that behavior, you have to say so explicitly. Often, you can refactor so you only need __new__, or so you don't need __new__, or so that __init__ behaves differently on an already-initialised object. But if you really want to, Python does actually allow you to redefine "the job", so that SomeClass(arg1, arg2) doesn't necessarily call __new__ followed by __init__. To do this, you need to create a metaclass, and define its __call__ method. A metaclass is just the class of a class. And a class' __call__ method controls what happens when you call instances of the class. So a metaclass' __call__ method controls what happens when you call a class; i.e. it allows you to redefine the instance-creation mechanism from start to finish. This is the level at which you can most elegantly implement a completely non-standard instance creation process such as the singleton pattern. In fact, with less than 10 lines of code you can implement a Singleton metaclass that then doesn't even require you to futz with __new__ at all, and can turn any otherwise-normal class into a singleton by simply adding __metaclass__ = Singleton! class Singleton(type): def __init__(self, *args, **kwargs): super(Singleton, self).__init__(*args, **kwargs) self.__instance = None def __call__(self, *args, **kwargs): if self.__instance is None: self.__instance = super(Singleton, self).__call__(*args, **kwargs) return self.__instance However this is probably deeper magic than is really warranted for this situation! A: To quote the documentation: Typical implementations create a new instance of the class by invoking the superclass's __new__() method using "super(currentclass, cls).__new__(cls[, ...])"with appropriate arguments and then modifying the newly-created instance as necessary before returning it. ... If __new__() does not return an instance of cls, then the new instance's __init__() method will not be invoked. __new__() is intended mainly to allow subclasses of immutable types (like int, str, or tuple) to customize instance creation. A: I realize that this question is quite old but I had a similar issue. The following did what I wanted: class Agent(object): _agents = dict() def __new__(cls, *p): number = p[0] if not number in cls._agents: cls._agents[number] = object.__new__(cls) return cls._agents[number] def __init__(self, number): self.number = number def __eq__(self, rhs): return self.number == rhs.number Agent("a") is Agent("a") == True I used this page as a resource http://infohost.nmt.edu/tcc/help/pubs/python/web/new-new-method.html A: When __new__ returns instance of the same class, __init__ is run afterwards on returned object. I.e. you can NOT use __new__ to prevent __init__ from being run. Even if you return previously created object from __new__, it will be double (triple, etc...) initialized by __init__ again and again. Here is the generic approach to Singleton pattern which extends vartec answer above and fixes it: def SingletonClass(cls): class Single(cls): __doc__ = cls.__doc__ _initialized = False _instance = None def __new__(cls, *args, **kwargs): if not cls._instance: cls._instance = super(Single, cls).__new__(cls, *args, **kwargs) return cls._instance def __init__(self, *args, **kwargs): if self._initialized: return super(Single, self).__init__(*args, **kwargs) self.__class__._initialized = True # Its crucial to set this variable on the class! return Single Full story is here. Another approach, which in fact involves __new__ is to use classmethods: class Singleton(object): __initialized = False def __new__(cls, *args, **kwargs): if not cls.__initialized: cls.__init__(*args, **kwargs) cls.__initialized = True return cls class MyClass(Singleton): @classmethod def __init__(cls, x, y): print "init is here" @classmethod def do(cls): print "doing stuff" Please pay attention, that with this approach you need to decorate ALL of your methods with @classmethod, because you'll never use any real instance of MyClass. A: I think the simple answer to this question is that, if __new__ returns a value that is the same type as the class, the __init__ function executes, otherwise it won't. In this case your code returns A._dict('key') which is the same class as cls, so __init__ will be executed. A: class M(type): _dict = {} def __call__(cls, key): if key in cls._dict: print 'EXISTS' return cls._dict[key] else: print 'NEW' instance = super(M, cls).__call__(key) cls._dict[key] = instance return instance class A(object): __metaclass__ = M def __init__(self, key): print 'INIT' self.key = key print a1 = A('aaa') a2 = A('bbb') a3 = A('aaa') outputs: NEW INIT NEW INIT EXISTS NB As a side effect M._dict property automatically becomes accessible from A as A._dict so take care not to overwrite it incidentally. A: An update to @AntonyHatchkins answer, you probably want a separate dictionary of instances for each class of the metatype, meaning that you should have an __init__ method in the metaclass to initialize your class object with that dictionary instead of making it global across all the classes. class MetaQuasiSingleton(type): def __init__(cls, name, bases, attibutes): cls._dict = {} def __call__(cls, key): if key in cls._dict: print('EXISTS') instance = cls._dict[key] else: print('NEW') instance = super().__call__(key) cls._dict[key] = instance return instance class A(metaclass=MetaQuasiSingleton): def __init__(self, key): print 'INIT' self.key = key print() I have gone ahead and updated the original code with an __init__ method and changed the syntax to Python 3 notation (no-arg call to super and metaclass in the class arguments instead of as an attribute). Either way, the important point here is that your class initializer (__call__ method) will not execute either __new__ or __init__ if the key is found. This is much cleaner than using __new__, which requires you to mark the object if you want to skip the default __init__ step. A: __new__ should return a new, blank instance of a class. __init__ is then called to initialise that instance. You're not calling __init__ in the "NEW" case of __new__, so it's being called for you. The code that is calling __new__ doesn't keep track of whether __init__ has been called on a particular instance or not nor should it, because you're doing something very unusual here. You could add an attribute to the object in the __init__ function to indicate that it's been initialised. Check for the existence of that attribute as the first thing in __init__ and don't proceed any further if it has been. A: Referring to this doc: When subclassing immutable built-in types like numbers and strings, and occasionally in other situations, the static method __new__ comes in handy. __new__ is the first step in instance construction, invoked before __init__. The __new__ method is called with the class as its first argument; its responsibility is to return a new instance of that class. Compare this to __init__: __init__ is called with an instance as its first argument, and it doesn't return anything; its responsibility is to initialize the instance. There are situations where a new instance is created without calling __init__ (for example when the instance is loaded from a pickle). There is no way to create a new instance without calling __new__ (although in some cases you can get away with calling a base class's __new__). Regarding what you wish to achieve, there also in same doc info about Singleton pattern class Singleton(object): def __new__(cls, *args, **kwds): it = cls.__dict__.get("__it__") if it is not None: return it cls.__it__ = it = object.__new__(cls) it.init(*args, **kwds) return it def init(self, *args, **kwds): pass you may also use this implementation from PEP 318, using a decorator def singleton(cls): instances = {} def getinstance(): if cls not in instances: instances[cls] = cls() return instances[cls] return getinstance @singleton class MyClass: ... A: One should look at __init__ as a simple constructor in traditional OO languages. For example, if you are familiar with Java or C++, the constructor is passed a pointer to its own instance implicitly. In the case of Java, it is the this variable. If one were to inspect the byte code generated for Java, one would notice two calls. The first call is to an "new" method, and then next call is to the init method (which is the actual call to the user defined constructor). This two step process enables creation of the actual instance before calling the constructor method of the class which is just another method of that instance. Now, in the case of Python, __new__ is a added facility that is accessible to the user. Java does not provide that flexibility, due to its typed nature. If a language provided that facility, then the implementor of __new__ could do many things in that method before returning the instance, including creating a totally new instance of a unrelated object in some cases. And, this approach also works out well for especially for immutable types in the case of Python. A: Digging little deeper into that! The type of a generic class in CPython is type and its base class is Object (Unless you explicitly define another base class like a metaclass). The sequence of low level calls can be found here. The first method called is the type_call which then calls tp_new and then tp_init. The interesting part here is that tp_new will call the Object's (base class) new method object_new which does a tp_alloc (PyType_GenericAlloc) which allocates the memory for the object :) At that point the object is created in memory and then the __init__ method gets called. If __init__ is not implemented in your class then the object_init gets called and it does nothing :) Then type_call just returns the object which binds to your variable. A: However, I'm a bit confused as to why __init__ is always called after __new__. I think the C++ analogy would be useful here: __new__ simply allocates memory for the object. The instance variables of an object needs memory to hold it, and this is what the step __new__ would do. __init__ initialize the internal variables of the object to specific values (could be default). A: Now I've got the same problem, and for some reasons I decided to avoid decorators, factories and metaclasses. I did it like this: Main file def _alt(func): import functools @functools.wraps(func) def init(self, *p, **k): if hasattr(self, "parent_initialized"): return else: self.parent_initialized = True func(self, *p, **k) return init class Parent: # Empty dictionary, shouldn't ever be filled with anything else parent_cache = {} def __new__(cls, n, *args, **kwargs): # Checks if object with this ID (n) has been created if n in cls.parent_cache: # It was, return it return cls.parent_cache[n] else: # Check if it was modified by this function if not hasattr(cls, "parent_modified"): # Add the attribute cls.parent_modified = True cls.parent_cache = {} # Apply it cls.__init__ = _alt(cls.__init__) # Get the instance obj = super().__new__(cls) # Push it to cache cls.parent_cache[n] = obj # Return it return obj Example classes class A(Parent): def __init__(self, n): print("A.__init__", n) class B(Parent): def __init__(self, n): print("B.__init__", n) In use >>> A(1) A.__init__ 1 # First A(1) initialized <__main__.A object at 0x000001A73A4A2E48> >>> A(1) # Returned previous A(1) <__main__.A object at 0x000001A73A4A2E48> >>> A(2) A.__init__ 2 # First A(2) initialized <__main__.A object at 0x000001A7395D9C88> >>> B(2) B.__init__ 2 # B class doesn't collide with A, thanks to separate cache <__main__.B object at 0x000001A73951B080> Warning: You shouldn't initialize Parent, it will collide with other classes - unless you defined separate cache in each of the children, that's not what we want. Warning: It seems a class with Parent as grandparent behaves weird. [Unverified] Try it online! A: The __init__ is called after __new__ so that when you override it in a subclass, your added code will still get called. If you are trying to subclass a class that already has a __new__, someone unaware of this might start by adapting the __init__ and forwarding the call down to the subclass __init__. This convention of calling __init__ after __new__ helps that work as expected. The __init__ still needs to allow for any parameters the superclass __new__ needed, but failing to do so will usually create a clear runtime error. And the __new__ should probably explicitly allow for *args and '**kw', to make it clear that extension is OK. It is generally bad form to have both __new__ and __init__ in the same class at the same level of inheritance, because of the behavior the original poster described. A: However, I'm a bit confused as to why __init__ is always called after __new__. Not much of a reason other than that it just is done that way. __new__ doesn't have the responsibility of initializing the class, some other method does (__call__, possibly-- I don't know for sure). I wasn't expecting this. Can anyone tell me why this is happening and how I implement this functionality otherwise? (apart from putting the implementation into the __new__ which feels quite hacky). You could have __init__ do nothing if it's already been initialized, or you could write a new metaclass with a new __call__ that only calls __init__ on new instances, and otherwise just returns __new__(...). A: The simple reason is that the new is used for creating an instance, while init is used for initializing the instance. Before initializing, the instance should be created first. That's why new should be called before init. A: When instantiating a class, first, __new__() is called to create the instance of a class, then __init__() is called to initialize the instance. __new__(): Called to create a new instance of class cls. ... If __new__() is invoked during object construction and it returns an instance of cls, then the new instance’s __init__() method will be invoked like __init__(self[, ...]), ... __init__(): Called after the instance has been created (by __new__()), ... Because __new__() and __init__() work together in constructing objects (__new__() to create it, and __init__() to customize it), ... For example, when instantiating Teacher class, first, __new__() is called to create the instance of Teacher class, then __init__() is called to initialize the instance as shown below: class Teacher: def __init__(self, name): self.name = name class Student: def __init__(self, name): self.name = name obj = Teacher("John") # Instantiation print(obj.name) This is the output: <class '__main__.Teacher'> John And, using __new__() of the instance of Teacher class, we can create the instance of Student class as shown below: # ... obj = Teacher("John") print(type(obj)) print(obj.name) obj = obj.__new__(Student) # Creates the instance of "Student" class print(type(obj)) Now, the instance of Student class is created as shown below: <class '__main__.Teacher'> <__main__.Teacher object at 0x7f4e3950bf10> <class '__main__.Student'> # Here Next, if we try to get the value of name variable from **the instance of Student class as shown below: obj = Teacher("John") print(type(obj)) print(obj.name) obj = obj.__new__(Student) print(type(obj)) print(obj.name) # Tries to get the value of "name" variable The error below occurs because the instance of Student class has not been initialized by __init__() yet: AttributeError: 'Student' object has no attribute 'name' So, we initialize the instance of Student class as shown below: obj = Teacher("John") print(type(obj)) print(obj.name) obj = obj.__new__(Student) print(type(obj)) obj.__init__("Tom") # Initializes the instance of "Student" class print(obj.name) Then, we can get the value of name variable from the instance of Student class as shown below: <class '__main__.Teacher'> John <class '__main__.Student'> Tom # Here
Why is __init__() always called after __new__()?
I'm just trying to streamline one of my classes and have introduced some functionality in the same style as the flyweight design pattern. However, I'm a bit confused as to why __init__ is always called after __new__. I wasn't expecting this. Can anyone tell me why this is happening and how I can implement this functionality otherwise? (Apart from putting the implementation into the __new__ which feels quite hacky.) Here's an example: class A(object): _dict = dict() def __new__(cls): if 'key' in A._dict: print "EXISTS" return A._dict['key'] else: print "NEW" return super(A, cls).__new__(cls) def __init__(self): print "INIT" A._dict['key'] = self print "" a1 = A() a2 = A() a3 = A() Outputs: NEW INIT EXISTS INIT EXISTS INIT Why?
[ "\nUse __new__ when you need to control\nthe creation of a new instance.\n\n\nUse\n__init__ when you need to control initialization of a new instance.\n__new__ is the first step of instance creation. It's called first, and is\nresponsible for returning a new\ninstance of your class.\n\n\nIn contrast,\n__init__ doesn't return anything; it's only responsible for initializing the\ninstance after it's been created.\nIn general, you shouldn't need to\noverride __new__ unless you're\nsubclassing an immutable type like\nstr, int, unicode or tuple.\n\nFrom April 2008 post: When to use __new__ vs. __init__? on mail.python.org.\nYou should consider that what you are trying to do is usually done with a Factory and that's the best way to do it. Using __new__ is not a good clean solution so please consider the usage of a factory. Here's a good example: ActiveState Fᴀᴄᴛᴏʀʏ ᴘᴀᴛᴛᴇʀɴ Recipe.\n", "__new__ is static class method, while __init__ is instance method. \n__new__ has to create the instance first, so __init__ can initialize it. Note that __init__ takes self as parameter. Until you create instance there is no self.\nNow, I gather, that you're trying to implement singleton pattern in Python. There are a few ways to do that.\nAlso, as of Python 2.6, you can use class decorators. \ndef singleton(cls):\n instances = {}\n def getinstance():\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]\n return getinstance\n\n@singleton\nclass MyClass:\n ...\n\n", "In most well-known OO languages, an expression like SomeClass(arg1, arg2) will allocate a new instance, initialise the instance's attributes, and then return it.\nIn most well-known OO languages, the \"initialise the instance's attributes\" part can be customised for each class by defining a constructor, which is basically just a block of code that operates on the new instance (using the arguments provided to the constructor expression) to set up whatever initial conditions are desired. In Python, this corresponds to the class' __init__ method.\nPython's __new__ is nothing more and nothing less than similar per-class customisation of the \"allocate a new instance\" part. This of course allows you to do unusual things such as returning an existing instance rather than allocating a new one. So in Python, we shouldn't really think of this part as necessarily involving allocation; all that we require is that __new__ comes up with a suitable instance from somewhere.\nBut it's still only half of the job, and there's no way for the Python system to know that sometimes you want to run the other half of the job (__init__) afterwards and sometimes you don't. If you want that behavior, you have to say so explicitly.\nOften, you can refactor so you only need __new__, or so you don't need __new__, or so that __init__ behaves differently on an already-initialised object. But if you really want to, Python does actually allow you to redefine \"the job\", so that SomeClass(arg1, arg2) doesn't necessarily call __new__ followed by __init__. To do this, you need to create a metaclass, and define its __call__ method.\nA metaclass is just the class of a class. And a class' __call__ method controls what happens when you call instances of the class. So a metaclass' __call__ method controls what happens when you call a class; i.e. it allows you to redefine the instance-creation mechanism from start to finish. This is the level at which you can most elegantly implement a completely non-standard instance creation process such as the singleton pattern. In fact, with less than 10 lines of code you can implement a Singleton metaclass that then doesn't even require you to futz with __new__ at all, and can turn any otherwise-normal class into a singleton by simply adding __metaclass__ = Singleton!\nclass Singleton(type):\n def __init__(self, *args, **kwargs):\n super(Singleton, self).__init__(*args, **kwargs)\n self.__instance = None\n def __call__(self, *args, **kwargs):\n if self.__instance is None:\n self.__instance = super(Singleton, self).__call__(*args, **kwargs)\n return self.__instance\n\nHowever this is probably deeper magic than is really warranted for this situation!\n", "To quote the documentation:\n\nTypical implementations create a new instance of the class by invoking\n the superclass's __new__() method using \"super(currentclass,\n cls).__new__(cls[, ...])\"with appropriate arguments and then\n modifying the newly-created instance as necessary before returning it.\n...\nIf __new__() does not return an instance of cls, then the new\n instance's __init__() method will not be invoked.\n__new__() is intended mainly to allow subclasses of immutable\n types (like int, str, or tuple) to customize instance creation.\n\n", "I realize that this question is quite old but I had a similar issue.\nThe following did what I wanted:\nclass Agent(object):\n _agents = dict()\n\n def __new__(cls, *p):\n number = p[0]\n if not number in cls._agents:\n cls._agents[number] = object.__new__(cls)\n return cls._agents[number]\n\n def __init__(self, number):\n self.number = number\n\n def __eq__(self, rhs):\n return self.number == rhs.number\n\nAgent(\"a\") is Agent(\"a\") == True\n\nI used this page as a resource http://infohost.nmt.edu/tcc/help/pubs/python/web/new-new-method.html\n", "When __new__ returns instance of the same class, __init__ is run afterwards on returned object. I.e. you can NOT use __new__ to prevent __init__ from being run. Even if you return previously created object from __new__, it will be double (triple, etc...) initialized by __init__ again and again.\nHere is the generic approach to Singleton pattern which extends vartec answer above and fixes it:\ndef SingletonClass(cls):\n class Single(cls):\n __doc__ = cls.__doc__\n _initialized = False\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Single, cls).__new__(cls, *args, **kwargs)\n return cls._instance\n\n def __init__(self, *args, **kwargs):\n if self._initialized:\n return\n super(Single, self).__init__(*args, **kwargs)\n self.__class__._initialized = True # Its crucial to set this variable on the class!\n return Single\n\nFull story is here.\nAnother approach, which in fact involves __new__ is to use classmethods:\nclass Singleton(object):\n __initialized = False\n\n def __new__(cls, *args, **kwargs):\n if not cls.__initialized:\n cls.__init__(*args, **kwargs)\n cls.__initialized = True\n return cls\n\n\nclass MyClass(Singleton):\n @classmethod\n def __init__(cls, x, y):\n print \"init is here\"\n\n @classmethod\n def do(cls):\n print \"doing stuff\"\n\nPlease pay attention, that with this approach you need to decorate ALL of your methods with @classmethod, because you'll never use any real instance of MyClass.\n", "I think the simple answer to this question is that, if __new__ returns a value that is the same type as the class, the __init__ function executes, otherwise it won't. In this case your code returns A._dict('key') which is the same class as cls, so __init__ will be executed.\n", "class M(type):\n _dict = {}\n\n def __call__(cls, key):\n if key in cls._dict:\n print 'EXISTS'\n return cls._dict[key]\n else:\n print 'NEW'\n instance = super(M, cls).__call__(key)\n cls._dict[key] = instance\n return instance\n\nclass A(object):\n __metaclass__ = M\n\n def __init__(self, key):\n print 'INIT'\n self.key = key\n print\n\na1 = A('aaa')\na2 = A('bbb')\na3 = A('aaa')\n\noutputs:\nNEW\nINIT\n\nNEW\nINIT\n\nEXISTS\n\nNB As a side effect M._dict property automatically becomes accessible from A as A._dict so take care not to overwrite it incidentally.\n", "An update to @AntonyHatchkins answer, you probably want a separate dictionary of instances for each class of the metatype, meaning that you should have an __init__ method in the metaclass to initialize your class object with that dictionary instead of making it global across all the classes.\nclass MetaQuasiSingleton(type):\n def __init__(cls, name, bases, attibutes):\n cls._dict = {}\n\n def __call__(cls, key):\n if key in cls._dict:\n print('EXISTS')\n instance = cls._dict[key]\n else:\n print('NEW')\n instance = super().__call__(key)\n cls._dict[key] = instance\n return instance\n\nclass A(metaclass=MetaQuasiSingleton):\n def __init__(self, key):\n print 'INIT'\n self.key = key\n print()\n\nI have gone ahead and updated the original code with an __init__ method and changed the syntax to Python 3 notation (no-arg call to super and metaclass in the class arguments instead of as an attribute).\nEither way, the important point here is that your class initializer (__call__ method) will not execute either __new__ or __init__ if the key is found. This is much cleaner than using __new__, which requires you to mark the object if you want to skip the default __init__ step.\n", "__new__ should return a new, blank instance of a class. __init__ is then called to initialise that instance. You're not calling __init__ in the \"NEW\" case of __new__, so it's being called for you. The code that is calling __new__ doesn't keep track of whether __init__ has been called on a particular instance or not nor should it, because you're doing something very unusual here.\nYou could add an attribute to the object in the __init__ function to indicate that it's been initialised. Check for the existence of that attribute as the first thing in __init__ and don't proceed any further if it has been.\n", "Referring to this doc:\n\nWhen subclassing immutable built-in types like numbers and strings,\nand occasionally in other situations, the static method __new__ comes\nin handy. __new__ is the first step in instance construction, invoked\nbefore __init__.\n\n\nThe __new__ method is called with the class as its\nfirst argument; its responsibility is to return a new instance of that\nclass.\n\n\nCompare this to __init__: __init__ is called with an instance\nas its first argument, and it doesn't return anything; its\nresponsibility is to initialize the instance.\n\n\nThere are situations\nwhere a new instance is created without calling __init__ (for example\nwhen the instance is loaded from a pickle). There is no way to create\na new instance without calling __new__ (although in some cases you can\nget away with calling a base class's __new__).\n\nRegarding what you wish to achieve, there also in same doc info about Singleton pattern\nclass Singleton(object):\n def __new__(cls, *args, **kwds):\n it = cls.__dict__.get(\"__it__\")\n if it is not None:\n return it\n cls.__it__ = it = object.__new__(cls)\n it.init(*args, **kwds)\n return it\n def init(self, *args, **kwds):\n pass\n\nyou may also use this implementation from PEP 318, using a decorator\ndef singleton(cls):\n instances = {}\n def getinstance():\n if cls not in instances:\n instances[cls] = cls()\n return instances[cls]\n return getinstance\n\n@singleton\nclass MyClass:\n...\n\n", "One should look at __init__ as a simple constructor in traditional OO languages. For example, if you are familiar with Java or C++, the constructor is passed a pointer to its own instance implicitly. In the case of Java, it is the this variable. If one were to inspect the byte code generated for Java, one would notice two calls. The first call is to an \"new\" method, and then next call is to the init method (which is the actual call to the user defined constructor). This two step process enables creation of the actual instance before calling the constructor method of the class which is just another method of that instance.\nNow, in the case of Python, __new__ is a added facility that is accessible to the user. Java does not provide that flexibility, due to its typed nature. If a language provided that facility, then the implementor of __new__ could do many things in that method before returning the instance, including creating a totally new instance of a unrelated object in some cases. And, this approach also works out well for especially for immutable types in the case of Python. \n", "Digging little deeper into that!\nThe type of a generic class in CPython is type and its base class is Object (Unless you explicitly define another base class like a metaclass). The sequence of low level calls can be found here. The first method called is the type_call which then calls tp_new and then tp_init. \nThe interesting part here is that tp_new will call the Object's (base class) new method object_new which does a tp_alloc (PyType_GenericAlloc) which allocates the memory for the object :)\nAt that point the object is created in memory and then the __init__ method gets called. If __init__ is not implemented in your class then the object_init gets called and it does nothing :)\nThen type_call just returns the object which binds to your variable.\n", "\nHowever, I'm a bit confused as to why __init__ is always called after __new__.\n\nI think the C++ analogy would be useful here:\n\n__new__ simply allocates memory for the object. The instance variables of an object needs memory to hold it, and this is what the step __new__ would do.\n__init__ initialize the internal variables of the object to specific values (could be default).\n\n", "Now I've got the same problem, and for some reasons I decided to avoid decorators, factories and metaclasses. I did it like this:\nMain file\ndef _alt(func):\n import functools\n @functools.wraps(func)\n def init(self, *p, **k):\n if hasattr(self, \"parent_initialized\"):\n return\n else:\n self.parent_initialized = True\n func(self, *p, **k)\n\n return init\n\n\nclass Parent:\n # Empty dictionary, shouldn't ever be filled with anything else\n parent_cache = {}\n\n def __new__(cls, n, *args, **kwargs):\n\n # Checks if object with this ID (n) has been created\n if n in cls.parent_cache:\n\n # It was, return it\n return cls.parent_cache[n]\n\n else:\n\n # Check if it was modified by this function\n if not hasattr(cls, \"parent_modified\"):\n # Add the attribute\n cls.parent_modified = True\n cls.parent_cache = {}\n\n # Apply it\n cls.__init__ = _alt(cls.__init__)\n\n # Get the instance\n obj = super().__new__(cls)\n\n # Push it to cache\n cls.parent_cache[n] = obj\n\n # Return it\n return obj\n\nExample classes\nclass A(Parent):\n\n def __init__(self, n):\n print(\"A.__init__\", n)\n\n\nclass B(Parent):\n\n def __init__(self, n):\n print(\"B.__init__\", n)\n\nIn use\n>>> A(1)\nA.__init__ 1 # First A(1) initialized \n<__main__.A object at 0x000001A73A4A2E48>\n>>> A(1) # Returned previous A(1)\n<__main__.A object at 0x000001A73A4A2E48>\n>>> A(2)\nA.__init__ 2 # First A(2) initialized\n<__main__.A object at 0x000001A7395D9C88>\n>>> B(2)\nB.__init__ 2 # B class doesn't collide with A, thanks to separate cache\n<__main__.B object at 0x000001A73951B080>\n\n\nWarning: You shouldn't initialize Parent, it will collide with other classes - unless you defined separate cache in each of the children, that's not what we want.\nWarning: It seems a class with Parent as grandparent behaves weird. [Unverified]\n\nTry it online!\n", "The __init__ is called after __new__ so that when you override it in a subclass, your added code will still get called.\nIf you are trying to subclass a class that already has a __new__, someone unaware of this might start by adapting the __init__ and forwarding the call down to the subclass __init__. This convention of calling __init__ after __new__ helps that work as expected.\nThe __init__ still needs to allow for any parameters the superclass __new__ needed, but failing to do so will usually create a clear runtime error. And the __new__ should probably explicitly allow for *args and '**kw', to make it clear that extension is OK.\nIt is generally bad form to have both __new__ and __init__ in the same class at the same level of inheritance, because of the behavior the original poster described.\n", "\nHowever, I'm a bit confused as to why __init__ is always called after __new__.\n\nNot much of a reason other than that it just is done that way. __new__ doesn't have the responsibility of initializing the class, some other method does (__call__, possibly-- I don't know for sure).\n\nI wasn't expecting this. Can anyone tell me why this is happening and how I implement this functionality otherwise? (apart from putting the implementation into the __new__ which feels quite hacky).\n\nYou could have __init__ do nothing if it's already been initialized, or you could write a new metaclass with a new __call__ that only calls __init__ on new instances, and otherwise just returns __new__(...).\n", "The simple reason is that the new is used for creating an instance, while init is used for initializing the instance. Before initializing, the instance should be created first. That's why new should be called before init.\n", "When instantiating a class, first, __new__() is called to create the instance of a class, then __init__() is called to initialize the instance.\n__new__():\n\nCalled to create a new instance of class cls. ...\nIf __new__() is invoked during object construction and it returns an\ninstance of cls, then the new instance’s __init__() method will be\ninvoked like __init__(self[, ...]), ...\n\n__init__():\n\nCalled after the instance has been created (by __new__()), ...\nBecause __new__() and __init__() work together in constructing objects\n(__new__() to create it, and __init__() to customize it), ...\n\nFor example, when instantiating Teacher class, first, __new__() is called to create the instance of Teacher class, then __init__() is called to initialize the instance as shown below:\nclass Teacher:\n def __init__(self, name):\n self.name = name\n \nclass Student:\n def __init__(self, name):\n self.name = name\n\nobj = Teacher(\"John\") # Instantiation\n\nprint(obj.name)\n\nThis is the output:\n<class '__main__.Teacher'>\nJohn\n\nAnd, using __new__() of the instance of Teacher class, we can create the instance of Student class as shown below:\n# ...\n\nobj = Teacher(\"John\")\nprint(type(obj))\nprint(obj.name)\n\nobj = obj.__new__(Student) # Creates the instance of \"Student\" class\nprint(type(obj))\n\nNow, the instance of Student class is created as shown below:\n<class '__main__.Teacher'>\n<__main__.Teacher object at 0x7f4e3950bf10>\n<class '__main__.Student'> # Here\n\nNext, if we try to get the value of name variable from **the instance of Student class as shown below:\nobj = Teacher(\"John\")\nprint(type(obj))\nprint(obj.name)\n\nobj = obj.__new__(Student)\nprint(type(obj))\nprint(obj.name) # Tries to get the value of \"name\" variable\n\nThe error below occurs because the instance of Student class has not been initialized by __init__() yet:\n\nAttributeError: 'Student' object has no attribute 'name'\n\nSo, we initialize the instance of Student class as shown below:\nobj = Teacher(\"John\") \nprint(type(obj))\nprint(obj.name)\n\nobj = obj.__new__(Student)\nprint(type(obj))\nobj.__init__(\"Tom\") # Initializes the instance of \"Student\" class\nprint(obj.name)\n\nThen, we can get the value of name variable from the instance of Student class as shown below:\n<class '__main__.Teacher'>\nJohn\n<class '__main__.Student'>\nTom # Here\n\n" ]
[ 696, 198, 174, 28, 13, 12, 10, 7, 7, 5, 5, 5, 5, 5, 3, 2, 1, 1, 0 ]
[]
[]
[ "class_design", "design_patterns", "python" ]
stackoverflow_0000674304_class_design_design_patterns_python.txt
Q: How to get mouse inputs from raw data? Hello I am trying to develop a Linux game in Panda3D which uses python for coding so anything in python would work. The game requires two mouse inputs (movement and mouse clicks). I want to get the info from the files in /dev/input but a more convenient way would help. I've already got code to get the input file I want (/dev/input/event13 and /dev/input/event14) that works great. I've tried a lot of things but have only been able to get left click right click and a broken middle click which also triggers right click. Thanks! The code for the thing that can get the input file is this: import evdev def dev1(): devices = [evdev.InputDevice(path) for path in evdev.list_devices()] for device in devices: print(device.path, device.name) try: device1 = float(input('Type number of Mouse1. Not a number to update. ')) except ValueError: dev1() def dev2(): devices = [evdev.InputDevice(path) for path in evdev.list_devices()] for device in devices: print(device.path, device.name) try: device2 = float(input('Type number of Mouse2. Not a number to update. ')) except ValueError: dev2() dev1() dev2()
How to get mouse inputs from raw data?
Hello I am trying to develop a Linux game in Panda3D which uses python for coding so anything in python would work. The game requires two mouse inputs (movement and mouse clicks). I want to get the info from the files in /dev/input but a more convenient way would help. I've already got code to get the input file I want (/dev/input/event13 and /dev/input/event14) that works great. I've tried a lot of things but have only been able to get left click right click and a broken middle click which also triggers right click. Thanks! The code for the thing that can get the input file is this: import evdev def dev1(): devices = [evdev.InputDevice(path) for path in evdev.list_devices()] for device in devices: print(device.path, device.name) try: device1 = float(input('Type number of Mouse1. Not a number to update. ')) except ValueError: dev1() def dev2(): devices = [evdev.InputDevice(path) for path in evdev.list_devices()] for device in devices: print(device.path, device.name) try: device2 = float(input('Type number of Mouse2. Not a number to update. ')) except ValueError: dev2() dev1() dev2()
[]
[]
[ "The pynput module has a callback-based interface that let's you monitor mouse events such as movement and and clicks.\nCheck it out\n" ]
[ -1 ]
[ "input", "python" ]
stackoverflow_0074509920_input_python.txt
Q: How to create an ordered list of keys based on their value in a dict? I'm quite stuck with this problem in Python and I'm pretty sure it should be pretty easy to solve. Please find this dict example: d = { "a": "abc1", "b": "abc1", "c": "abc2", "d": "abc3", "e": "abc3", "f": "abc3", "g": "abc4" } Now I want a to create a list where 'a' till 'g' will be put in an order that is mixing up abc values as much as possible, but I require all keys in the list so: ['a','c','d','g','b','e'] Then 'f' will be left over (because e also has value abc3) and can be added to a leftover list. I tried the following: s = [] for x in d: if len(s) < 1: s.append(x) if d[s[-1]] is not d[x]: s.append(x) But this will produce just: ['a', 'c', 'd', 'g'] I need to go back and try again until no solutions are possible. Thanks a lot for your time and suggestions! A: This is the first code I got working. And now it also removes f. Any other ideas? d = { "a": "abc1", "b": "abc1", "c": "abc2", "d": "abc3", "e": "abc3", "f": "abc3", "g": "abc4" } list_number = {} key_lists = [] for key, value in d.items(): if value in list_number: index = list_number[value] = list_number[value] + 1 else: index = list_number[value] = 0 if index < len(key_lists): key_lists[index].append(key) else: key_lists.append([key]) result = [] remaining = [] for key_list in key_lists: if not result or len(key_list) > 1: result.extend(key_list) else: remaining.extend(key_list) print("# Result", result) print("# Remaining:", remaining) # Result ['a', 'c', 'd', 'g', 'b', 'e'] # Remaining: ['f'] A: Here is my solution: d = { "a": "abc1", "b": "abc1", "c": "abc2", "d": "abc3", "e": "abc3", "f": "abc3", "g": "abc4" } counted = {} done = False while not done: # Assume done done = True last_value = None for key, value in d.items(): if key not in counted and value != last_value: done = False last_value = value # Add the key to the result dict, the value 1 is arbitrary counted[key] = 1 result = list(counted) print(f"Result: {result}") Output: Result: ['a', 'c', 'd', 'g', 'b', 'e', 'f'] Notes The counted dictionary will have the same keys as in d, but ordered the way we want. The values are arbitrary. I could have used a list, but the look up key not in counted will take longer if d is a large dictionary. The while loop will keep going until we added all keys to the counted dictionary. The inner for loop will go through the keys/values and only add those keys that are qualified by the if conditions. A: The questions I was asking up top were to turn your informal question into something that can be easily implemented in code. In this case, you seem be asking: every key from d should appear in the output, if possible it must not follow a key that has the same value To solve this it helps to work out what constraints we can impose to make the algorithm easier. In this case the difficulty seems to be those keys that share the same value. This implies we should try and deal with those first, hence my question whether d a e b f c g is a valid solution. I saw that abc3 appeared the most, and so I started with that. When turning this into code, a key concept is the priority queue. An implementation of this is available in the standard heapq Python module. It's then a relatively simple matter of writing the code and handling the edge cases. I've put quite a few comments in that try to explain what it's doing: from heapq import heapify, heappop, heappush from collections import defaultdict from random import shuffle, random def fn(d: dict, *, randomize=True): # get items from dictionary if randomize: # add random value to keep heap random mk = lambda l: (-(len(l) + random()), l) items = list(d.items()) # ensure keys under each value are randomized shuffle(items) else: mk = lambda l: (-len(l), l) items = d.items() # collect all the keys together under their values acc = defaultdict(list) for k, v in items: acc[v].append(k) # build a priority queue, longest list of keys first queue = [mk(ks) for _, ks in acc.items()] heapify(queue) # pull out distinct values result = [] while queue: # add an item from the longest list _, ks = heappop(queue) result.append(ks.pop()) # if there are other keys available if queue: # add an item from second longest list _, ks2 = heappop(queue) result.append(ks2.pop()) if ks2: # push remaining items back onto the queue heappush(queue, mk(ks2)) elif ks: # no other keys available and this one has entries, so we're stuck break # push remaining items back onto the queue if ks: heappush(queue, mk(ks)) # done return result # test fn({"a": 1, "b": 1, "c": 2, "d": 3, "e": 3, "f": 3, "g": 4}, randomize=False) which prints: ['f', 'b', 'e', 'a', 'c', 'd', 'g'] Because you originally tagged random, I made it randomize the output when randomize=False isn't passed. This is what it does by default, so for example, fn(d) might give: ['d', 'a', 'f', 'b', 'c', 'e', 'g'] A: Here's another way: from collections import defaultdict from itertools import zip_longest, chain d = { "a": "abc1", "b": "abc1", "c": "abc2", "d": "abc3", "e": "abc3", "f": "abc3", "g": "abc4" } # Create and populate default dict of "value": [key1, key2, ...] vk = defaultdict(list) [vk[v].append(k) for k,v in d.items()] # Zip the values together column by column and remove any columns # with only one key because this means it's repeating. ll = [c for c in [[b for b in a if b is not None] for a in zip_longest(*vk.values())] if len(c) > 1] # Flatten the list of lists into list of keys. keys = list(chain(*ll)) print(keys) This returns: ['a', 'c', 'd', 'g', 'b', 'e'] The way this works is by first inverting the values and keys of d into the vk defaultdict: > vk = defaultdict(list) > [vk[v].append(k) for k,v in d.items()] > vk defaultdict(list, {'abc1': ['a', 'b'], 'abc2': ['c'], 'abc3': ['d', 'e', 'f'], 'abc4': ['g']}) This next step does a couple of things. The zip_longest() function returns this: > list(zip_longest(*dd.values())) [('a', 'c', 'd', 'g'), ('b', None, 'e', None), (None, None, 'f', None)] The [b for b in a if b is not None] bit removes all the None entries so now you are left with: > [[b for b in a if b is not None] for a in zip_longest(*dd.values())] [['a', 'c', 'd', 'g'], ['b', 'e'], ['f']] From this we only keep any sub-lists that are longer than a single element with len(c) > 1. This is because a single element must be a repeat of the row above it (see the list(zip_longest(*dd.values())) structure). This leaves us the list of lists ll. > ll [['a', 'c', 'd', 'g'], ['b', 'e']] We flatten that with chain(*ll) which returns an iterator that we turn into a list of our original keys that we can do what we want: > keys = list(chain(*ll)) > print(keys) ['a', 'c', 'd', 'g', 'b', 'e']
How to create an ordered list of keys based on their value in a dict?
I'm quite stuck with this problem in Python and I'm pretty sure it should be pretty easy to solve. Please find this dict example: d = { "a": "abc1", "b": "abc1", "c": "abc2", "d": "abc3", "e": "abc3", "f": "abc3", "g": "abc4" } Now I want a to create a list where 'a' till 'g' will be put in an order that is mixing up abc values as much as possible, but I require all keys in the list so: ['a','c','d','g','b','e'] Then 'f' will be left over (because e also has value abc3) and can be added to a leftover list. I tried the following: s = [] for x in d: if len(s) < 1: s.append(x) if d[s[-1]] is not d[x]: s.append(x) But this will produce just: ['a', 'c', 'd', 'g'] I need to go back and try again until no solutions are possible. Thanks a lot for your time and suggestions!
[ "This is the first code I got working. And now it also removes f. Any other ideas?\nd = {\n \"a\": \"abc1\",\n \"b\": \"abc1\",\n \"c\": \"abc2\",\n \"d\": \"abc3\",\n \"e\": \"abc3\",\n \"f\": \"abc3\",\n \"g\": \"abc4\"\n}\n\nlist_number = {}\nkey_lists = []\n\nfor key, value in d.items():\n if value in list_number:\n index = list_number[value] = list_number[value] + 1\n else:\n index = list_number[value] = 0\n if index < len(key_lists):\n key_lists[index].append(key)\n else:\n key_lists.append([key])\nresult = []\nremaining = []\nfor key_list in key_lists:\n if not result or len(key_list) > 1:\n result.extend(key_list)\n else:\n remaining.extend(key_list)\nprint(\"# Result\", result)\nprint(\"# Remaining:\", remaining)\n# Result ['a', 'c', 'd', 'g', 'b', 'e']\n# Remaining: ['f']\n\n", "Here is my solution:\nd = {\n \"a\": \"abc1\",\n \"b\": \"abc1\",\n \"c\": \"abc2\",\n \"d\": \"abc3\",\n \"e\": \"abc3\",\n \"f\": \"abc3\",\n \"g\": \"abc4\"\n}\n\ncounted = {}\ndone = False\nwhile not done:\n # Assume done\n done = True\n last_value = None\n for key, value in d.items():\n if key not in counted and value != last_value:\n done = False\n last_value = value\n # Add the key to the result dict, the value 1 is arbitrary\n counted[key] = 1\n\nresult = list(counted)\nprint(f\"Result: {result}\")\n\nOutput:\nResult: ['a', 'c', 'd', 'g', 'b', 'e', 'f']\n\nNotes\n\nThe counted dictionary will have the same keys as in d, but ordered the way we want. The values are arbitrary. I could have used a list, but the look up key not in counted will take longer if d is a large dictionary.\nThe while loop will keep going until we added all keys to the counted dictionary.\nThe inner for loop will go through the keys/values and only add those keys that are qualified by the if conditions.\n\n", "The questions I was asking up top were to turn your informal question into something that can be easily implemented in code. In this case, you seem be asking:\n\nevery key from d should appear in the output, if possible\nit must not follow a key that has the same value\n\nTo solve this it helps to work out what constraints we can impose to make the algorithm easier. In this case the difficulty seems to be those keys that share the same value. This implies we should try and deal with those first, hence my question whether d a e b f c g is a valid solution. I saw that abc3 appeared the most, and so I started with that.\nWhen turning this into code, a key concept is the priority queue. An implementation of this is available in the standard heapq Python module.\nIt's then a relatively simple matter of writing the code and handling the edge cases. I've put quite a few comments in that try to explain what it's doing:\nfrom heapq import heapify, heappop, heappush\nfrom collections import defaultdict\nfrom random import shuffle, random\n\ndef fn(d: dict, *, randomize=True):\n # get items from dictionary\n if randomize:\n # add random value to keep heap random\n mk = lambda l: (-(len(l) + random()), l)\n items = list(d.items())\n # ensure keys under each value are randomized\n shuffle(items)\n else:\n mk = lambda l: (-len(l), l)\n items = d.items()\n # collect all the keys together under their values\n acc = defaultdict(list)\n for k, v in items:\n acc[v].append(k)\n # build a priority queue, longest list of keys first\n queue = [mk(ks) for _, ks in acc.items()]\n heapify(queue)\n # pull out distinct values\n result = []\n while queue:\n # add an item from the longest list\n _, ks = heappop(queue)\n result.append(ks.pop())\n # if there are other keys available\n if queue:\n # add an item from second longest list\n _, ks2 = heappop(queue)\n result.append(ks2.pop())\n if ks2:\n # push remaining items back onto the queue\n heappush(queue, mk(ks2))\n elif ks:\n # no other keys available and this one has entries, so we're stuck\n break\n # push remaining items back onto the queue\n if ks:\n heappush(queue, mk(ks))\n # done\n return result\n\n# test\nfn({\"a\": 1, \"b\": 1, \"c\": 2, \"d\": 3, \"e\": 3, \"f\": 3, \"g\": 4}, randomize=False)\n\nwhich prints:\n['f', 'b', 'e', 'a', 'c', 'd', 'g']\n\nBecause you originally tagged random, I made it randomize the output when randomize=False isn't passed. This is what it does by default, so for example, fn(d) might give:\n['d', 'a', 'f', 'b', 'c', 'e', 'g']\n\n", "Here's another way:\nfrom collections import defaultdict\nfrom itertools import zip_longest, chain\n\nd = {\n \"a\": \"abc1\",\n \"b\": \"abc1\",\n \"c\": \"abc2\",\n \"d\": \"abc3\",\n \"e\": \"abc3\",\n \"f\": \"abc3\",\n \"g\": \"abc4\"\n}\n\n# Create and populate default dict of \"value\": [key1, key2, ...]\nvk = defaultdict(list)\n[vk[v].append(k) for k,v in d.items()]\n\n# Zip the values together column by column and remove any columns\n# with only one key because this means it's repeating.\nll = [c for c in [[b for b in a if b is not None]\n for a in zip_longest(*vk.values())]\n if len(c) > 1]\n\n# Flatten the list of lists into list of keys.\nkeys = list(chain(*ll))\n\nprint(keys)\n\nThis returns:\n['a', 'c', 'd', 'g', 'b', 'e']\n\nThe way this works is by first inverting the values and keys of d into the vk defaultdict:\n> vk = defaultdict(list)\n> [vk[v].append(k) for k,v in d.items()]\n> vk\ndefaultdict(list,\n {'abc1': ['a', 'b'],\n 'abc2': ['c'],\n 'abc3': ['d', 'e', 'f'],\n 'abc4': ['g']})\n\nThis next step does a couple of things. The zip_longest() function returns this:\n> list(zip_longest(*dd.values()))\n[('a', 'c', 'd', 'g'),\n ('b', None, 'e', None),\n (None, None, 'f', None)]\n\nThe [b for b in a if b is not None] bit removes all the None entries so now you are left with:\n> [[b for b in a if b is not None] for a in zip_longest(*dd.values())]\n[['a', 'c', 'd', 'g'], ['b', 'e'], ['f']]\n\nFrom this we only keep any sub-lists that are longer than a single element with len(c) > 1. This is because a single element must be a repeat of the row above it (see the list(zip_longest(*dd.values())) structure). This leaves us the list of lists ll.\n> ll\n[['a', 'c', 'd', 'g'], ['b', 'e']]\n\nWe flatten that with chain(*ll) which returns an iterator that we turn into a list of our original keys that we can do what we want:\n> keys = list(chain(*ll))\n> print(keys)\n['a', 'c', 'd', 'g', 'b', 'e']\n\n" ]
[ 1, 0, 0, 0 ]
[]
[]
[ "dictionary", "python", "unique" ]
stackoverflow_0074476573_dictionary_python_unique.txt