content
stringlengths
85
101k
title
stringlengths
0
150
question
stringlengths
15
48k
answers
list
answers_scores
list
non_answers
list
non_answers_scores
list
tags
list
name
stringlengths
35
137
Q: ValueError: Input 0 of layer "model_10" is incompatible with the layer: expected shape=(None, 244, 244, 3), found shape=(None, 224, 224, 3) I am training a model on top of the prebuilt imagenetV2 model to classify dog breeds. Here is my code. import os import tensorflow as tf \_URL = 'http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar' path_to_zip = tf.keras.utils.get_file('images.tar', origin=\_URL, extract=True) BATCH_SIZE = 32 IMG_SIZE = (224, 224) dir = os.path.join(os.path.dirname(path_to_zip), 'Images') train_dataset = tf.keras.utils.image_dataset_from_directory(dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE, validation_split=.2, subset='training', seed=2021) validation_dataset = tf.keras.utils.image_dataset_from_directory(dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE, validation_split=.2, subset='validation', seed=2021) len(train_dataset.class_names) import matplotlib.pyplot as plt class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images\[i\].numpy().astype("uint8")) plt.title(class_names\[labels\[i\]\]) plt.axis("off") AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) val_batches = tf.data.experimental.cardinality(validation_dataset) data_augmentation = tf.keras.Sequential(\[ tf.keras.layers.RandomFlip('horizontal'), tf.keras.layers.RandomRotation(0.2), \]) for image, \_ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image\[0\] for i in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image\[0\] / 255) plt.axis('off') rescale = tf.keras.layers.Rescaling(1./127.5, offset=-1) IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.applications.MobileNetV2(IMG_SHAPE, include_top=False, weights='imagenet') base_model.trainable = False global_average_layer = tf.keras.layers.GlobalAveragePooling2D() model = tf.keras.Sequential() model.add(tf.keras.Input(shape=(244, 244, 3, ))) model.add(tf.keras.layers.RandomFlip('horizontal')) model.add(tf.keras.layers.RandomRotation(0.2)) model.add(rescale) model.add(base_model) model.add(global_average_layer) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(120)) model.summary() At this point I get the following warning WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 3) for input KerasTensor(type_spec=TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name='input_1'), name='input_1', description="created by layer 'input_1'"), but it was called on an input with incompatible shape (None, 244, 244, 3). And after I compile the model like below model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=\['accuracy'\]) model.summary() initial_epochs = 20 I get no errors but when I evaluate like so loss0, accuracy0 = model.evaluate(train_dataset) I get the following stack trace ValueError: in user code: File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1557, in test_function * return step_function(self, iterator) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1546, in step_function ** outputs = model.distribute_strategy.run(run_step, args=(data,)) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1535, in run_step ** outputs = model.test_step(data) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1499, in test_step y_pred = self(x, training=False) File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler raise e.with_traceback(filtered_tb) from None File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 264, in assert_input_compatibility raise ValueError(f'Input {input_index} of layer "{layer_name}" is ' ValueError: Input 0 of layer "model_10" is incompatible with the layer: expected shape=(None, 244, 244, 3), found shape=(None, 224, 224, 3) I have double checked the shape of everything and I am not sure what is causing this issue. I also have checked that the seed includes all classes for both training and validation. I suspect it has something to do with the transferred imagenet model but I have followed tensorflow tutorials closely and I have not seen a problem with the way I am doing things. Any and all help is appreciated thank you A: The error is due to the shape mismatch. Your input image is of shape (224, 224, 3) but the shape in the input layer is (244, 244, 3). Both the shapes should be same. model.add(tf.keras.Input(shape=(224, 224, 3))) Kindly change the input shape as above to avoid the error. Thank you!
ValueError: Input 0 of layer "model_10" is incompatible with the layer: expected shape=(None, 244, 244, 3), found shape=(None, 224, 224, 3)
I am training a model on top of the prebuilt imagenetV2 model to classify dog breeds. Here is my code. import os import tensorflow as tf \_URL = 'http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar' path_to_zip = tf.keras.utils.get_file('images.tar', origin=\_URL, extract=True) BATCH_SIZE = 32 IMG_SIZE = (224, 224) dir = os.path.join(os.path.dirname(path_to_zip), 'Images') train_dataset = tf.keras.utils.image_dataset_from_directory(dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE, validation_split=.2, subset='training', seed=2021) validation_dataset = tf.keras.utils.image_dataset_from_directory(dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE, validation_split=.2, subset='validation', seed=2021) len(train_dataset.class_names) import matplotlib.pyplot as plt class_names = train_dataset.class_names plt.figure(figsize=(10, 10)) for images, labels in train_dataset.take(1): for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(images\[i\].numpy().astype("uint8")) plt.title(class_names\[labels\[i\]\]) plt.axis("off") AUTOTUNE = tf.data.AUTOTUNE train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE) validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE) val_batches = tf.data.experimental.cardinality(validation_dataset) data_augmentation = tf.keras.Sequential(\[ tf.keras.layers.RandomFlip('horizontal'), tf.keras.layers.RandomRotation(0.2), \]) for image, \_ in train_dataset.take(1): plt.figure(figsize=(10, 10)) first_image = image\[0\] for i in range(9): ax = plt.subplot(3, 3, i + 1) augmented_image = data_augmentation(tf.expand_dims(first_image, 0)) plt.imshow(augmented_image\[0\] / 255) plt.axis('off') rescale = tf.keras.layers.Rescaling(1./127.5, offset=-1) IMG_SHAPE = IMG_SIZE + (3,) base_model = tf.keras.applications.MobileNetV2(IMG_SHAPE, include_top=False, weights='imagenet') base_model.trainable = False global_average_layer = tf.keras.layers.GlobalAveragePooling2D() model = tf.keras.Sequential() model.add(tf.keras.Input(shape=(244, 244, 3, ))) model.add(tf.keras.layers.RandomFlip('horizontal')) model.add(tf.keras.layers.RandomRotation(0.2)) model.add(rescale) model.add(base_model) model.add(global_average_layer) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(120)) model.summary() At this point I get the following warning WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 3) for input KerasTensor(type_spec=TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name='input_1'), name='input_1', description="created by layer 'input_1'"), but it was called on an input with incompatible shape (None, 244, 244, 3). And after I compile the model like below model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=\['accuracy'\]) model.summary() initial_epochs = 20 I get no errors but when I evaluate like so loss0, accuracy0 = model.evaluate(train_dataset) I get the following stack trace ValueError: in user code: File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1557, in test_function * return step_function(self, iterator) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1546, in step_function ** outputs = model.distribute_strategy.run(run_step, args=(data,)) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1535, in run_step ** outputs = model.test_step(data) File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1499, in test_step y_pred = self(x, training=False) File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler raise e.with_traceback(filtered_tb) from None File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 264, in assert_input_compatibility raise ValueError(f'Input {input_index} of layer "{layer_name}" is ' ValueError: Input 0 of layer "model_10" is incompatible with the layer: expected shape=(None, 244, 244, 3), found shape=(None, 224, 224, 3) I have double checked the shape of everything and I am not sure what is causing this issue. I also have checked that the seed includes all classes for both training and validation. I suspect it has something to do with the transferred imagenet model but I have followed tensorflow tutorials closely and I have not seen a problem with the way I am doing things. Any and all help is appreciated thank you
[ "The error is due to the shape mismatch. Your input image is of shape (224, 224, 3) but the shape in the input layer is (244, 244, 3). Both the shapes should be same.\nmodel.add(tf.keras.Input(shape=(224, 224, 3)))\n\nKindly change the input shape as above to avoid the error. Thank you!\n" ]
[ 0 ]
[]
[]
[ "classification", "imagenet", "python", "tensorflow", "transfer_learning" ]
stackoverflow_0074405887_classification_imagenet_python_tensorflow_transfer_learning.txt
Q: Get key by value in dictionary I made a function which will look up ages in a Dictionary and show the matching name: dictionary = {'george' : 16, 'amber' : 19} search_age = raw_input("Provide age") for age in dictionary.values(): if age == search_age: name = dictionary[age] print name I know how to compare and find the age I just don't know how to show the name of the person. Additionally, I am getting a KeyError because of line 5. I know it's not correct but I can't figure out how to make it search backwards. A: mydict = {'george': 16, 'amber': 19} print mydict.keys()[mydict.values().index(16)] # Prints george Or in Python 3.x: mydict = {'george': 16, 'amber': 19} print(list(mydict.keys())[list(mydict.values()).index(16)]) # Prints george Basically, it separates the dictionary's values in a list, finds the position of the value you have, and gets the key at that position. More about keys() and .values() in Python 3: How can I get list of values from dict? A: There is none. dict is not intended to be used this way. dictionary = {'george': 16, 'amber': 19} search_age = input("Provide age") for name, age in dictionary.items(): # for name, age in dictionary.iteritems(): (for Python 2.x) if age == search_age: print(name) A: If you want both the name and the age, you should be using .items() which gives you key (key, value) tuples: for name, age in mydict.items(): if age == search_age: print name You can unpack the tuple into two separate variables right in the for loop, then match the age. You should also consider reversing the dictionary if you're generally going to be looking up by age, and no two people have the same age: {16: 'george', 19: 'amber'} so you can look up the name for an age by just doing mydict[search_age] I've been calling it mydict instead of list because list is the name of a built-in type, and you shouldn't use that name for anything else. You can even get a list of all people with a given age in one line: [name for name, age in mydict.items() if age == search_age] or if there is only one person with each age: next((name for name, age in mydict.items() if age == search_age), None) which will just give you None if there isn't anyone with that age. Finally, if the dict is long and you're on Python 2, you should consider using .iteritems() instead of .items() as Cat Plus Plus did in his answer, since it doesn't need to make a copy of the list. A: I thought it would be interesting to point out which methods are the quickest, and in what scenario: Here's some tests I ran (on a 2012 MacBook Pro) def method1(dict, search_age): for name, age in dict.iteritems(): if age == search_age: return name def method2(dict, search_age): return [name for name,age in dict.iteritems() if age == search_age] def method3(dict, search_age): return dict.keys()[dict.values().index(search_age)] Results from profile.run() on each method 100,000 times: Method 1: >>> profile.run("for i in range(0,100000): method1(dict, 16)") 200004 function calls in 1.173 seconds Method 2: >>> profile.run("for i in range(0,100000): method2(dict, 16)") 200004 function calls in 1.222 seconds Method 3: >>> profile.run("for i in range(0,100000): method3(dict, 16)") 400004 function calls in 2.125 seconds So this shows that for a small dict, method 1 is the quickest. This is most likely because it returns the first match, as opposed to all of the matches like method 2 (see note below). Interestingly, performing the same tests on a dict I have with 2700 entries, I get quite different results (this time run 10,000 times): Method 1: >>> profile.run("for i in range(0,10000): method1(UIC_CRS,'7088380')") 20004 function calls in 2.928 seconds Method 2: >>> profile.run("for i in range(0,10000): method2(UIC_CRS,'7088380')") 20004 function calls in 3.872 seconds Method 3: >>> profile.run("for i in range(0,10000): method3(UIC_CRS,'7088380')") 40004 function calls in 1.176 seconds So here, method 3 is much faster. Just goes to show the size of your dict will affect which method you choose. Notes: Method 2 returns a list of all names, whereas methods 1 and 3 return only the first match. I have not considered memory usage. I'm not sure if method 3 creates 2 extra lists (keys() and values()) and stores them in memory. A: one line version: (i is an old dictionary, p is a reversed dictionary) explanation : i.keys() and i.values() returns two lists with keys and values of the dictionary respectively. The zip function has the ability to tie together lists to produce a dictionary. p = dict(zip(i.values(),i.keys())) Warning : This will work only if the values are hashable and unique. A: I found this answer very effective but not very easy to read for me. To make it more clear you can invert the key and the value of a dictionary. This is make the keys values and the values keys, as seen here. mydict = {'george':16,'amber':19} res = dict((v,k) for k,v in mydict.iteritems()) print(res[16]) # Prints george or for Python 3, (thanks @kkgarg) mydict = {'george':16,'amber':19} res = dict((v,k) for k,v in mydict.items()) print(res[16]) # Prints george Also print(res.get(16)) # Prints george which is essentially the same that this other answer. A: a = {'a':1,'b':2,'c':3} {v:k for k, v in a.items()}[1] or better {k:v for k, v in a.items() if v == 1} A: key = next((k for k in my_dict if my_dict[k] == val), None) A: Try this one-liner to reverse a dictionary: reversed_dictionary = dict(map(reversed, dictionary.items())) A: If you want to find the key by the value, you can use a dictionary comprehension to create a lookup dictionary and then use that to find the key from the value. lookup = {value: key for key, value in self.data} lookup[value] A: we can get the Key of dict by : def getKey(dct,value): return [key for key in dct if (dct[key] == value)] A: You can get key by using dict.keys(), dict.values() and list.index() methods, see code samples below: names_dict = {'george':16,'amber':19} search_age = int(raw_input("Provide age")) key = names_dict.keys()[names_dict.values().index(search_age)] A: Here is my take on this problem. :) I have just started learning Python, so I call this: "The Understandable for beginners" solution. #Code without comments. list1 = {'george':16,'amber':19, 'Garry':19} search_age = raw_input("Provide age: ") print search_age = int(search_age) listByAge = {} for name, age in list1.items(): if age == search_age: age = str(age) results = name + " " +age print results age2 = int(age) listByAge[name] = listByAge.get(name,0)+age2 print print listByAge . #Code with comments. #I've added another name with the same age to the list. list1 = {'george':16,'amber':19, 'Garry':19} #Original code. search_age = raw_input("Provide age: ") print #Because raw_input gives a string, we need to convert it to int, #so we can search the dictionary list with it. search_age = int(search_age) #Here we define another empty dictionary, to store the results in a more #permanent way. listByAge = {} #We use double variable iteration, so we get both the name and age #on each run of the loop. for name, age in list1.items(): #Here we check if the User Defined age = the age parameter #for this run of the loop. if age == search_age: #Here we convert Age back to string, because we will concatenate it #with the person's name. age = str(age) #Here we concatenate. results = name + " " +age #If you want just the names and ages displayed you can delete #the code after "print results". If you want them stored, don't... print results #Here we create a second variable that uses the value of #the age for the current person in the list. #For example if "Anna" is "10", age2 = 10, #integer value which we can use in addition. age2 = int(age) #Here we use the method that checks or creates values in dictionaries. #We create a new entry for each name that matches the User Defined Age #with default value of 0, and then we add the value from age2. listByAge[name] = listByAge.get(name,0)+age2 #Here we print the new dictionary with the users with User Defined Age. print print listByAge . #Results Running: *\test.py (Thu Jun 06 05:10:02 2013) Provide age: 19 amber 19 Garry 19 {'amber': 19, 'Garry': 19} Execution Successful! A: get_key = lambda v, d: next(k for k in d if d[k] is v) A: Consider using Pandas. As stated in William McKinney's "Python for Data Analysis' Another way to think about a Series is as a fixed-length, ordered dict, as it is a mapping of index values to data values. It can be used in many contexts where you might use a dict. import pandas as pd list = {'george':16,'amber':19} lookup_list = pd.Series(list) To query your series do the following: lookup_list[lookup_list.values == 19] Which yields: Out[1]: amber 19 dtype: int64 If you need to do anything else with the output transforming the answer into a list might be useful: answer = lookup_list[lookup_list.values == 19].index answer = pd.Index.tolist(answer) A: d= {'george':16,'amber':19} dict((v,k) for k,v in d.items()).get(16) The output is as follows: -> prints george A: Here, recover_key takes dictionary and value to find in dictionary. We then loop over the keys in dictionary and make a comparison with that of value and return that particular key. def recover_key(dicty,value): for a_key in dicty.keys(): if (dicty[a_key] == value): return a_key A: for name in mydict: if mydict[name] == search_age: print(name) #or do something else with it. #if in a function append to a temporary list, #then after the loop return the list A: my_dict = {'A': 19, 'B': 28, 'carson': 28} search_age = 28 take only one name = next((name for name, age in my_dict.items() if age == search_age), None) print(name) # 'B' get multiple data name_list = [name for name, age in filter(lambda item: item[1] == search_age, my_dict.items())] print(name_list) # ['B', 'carson'] A: I glimpsed all answers and none mentioned simply using list comprehension? This Pythonic one-line solution can return all keys for any number of given values (tested in Python 3.9.1): >>> dictionary = {'george' : 16, 'amber' : 19, 'frank': 19} >>> >>> age = 19 >>> name = [k for k in dictionary.keys() if dictionary[k] == age]; name ['george', 'frank'] >>> >>> age = (16, 19) >>> name = [k for k in dictionary.keys() if dictionary[k] in age]; name ['george', 'amber', 'frank'] >>> >>> age = (22, 25) >>> name = [k for k in dictionary.keys() if dictionary[k] in age]; name [] A: it's answered, but it could be done with a fancy 'map/reduce' use, e.g.: def find_key(value, dictionary): return reduce(lambda x, y: x if x is not None else y, map(lambda x: x[0] if x[1] == value else None, dictionary.iteritems())) A: I tried to read as many solutions as I can to prevent giving duplicate answer. However, if you are working on a dictionary which values are contained in lists and if you want to get keys that have a particular element you could do this: d = {'Adams': [18, 29, 30], 'Allen': [9, 27], 'Anderson': [24, 26], 'Bailey': [7, 30], 'Baker': [31, 7, 10, 19], 'Barnes': [22, 31, 10, 21], 'Bell': [2, 24, 17, 26]} Now lets find names that have 24 in their values. for key in d.keys(): if 24 in d[key]: print(key) This would work with multiple values as well. A: Just my answer in lambda and filter. filter( lambda x, dictionary=dictionary, search_age=int(search_age): dictionary[x] == search_age , dictionary ) A: One line solution using list comprehension, which returns multiple keys if the value is possibly present multiple times. [key for key,value in mydict.items() if value == 16] A: already been answered, but since several people mentioned reversing the dictionary, here's how you do it in one line (assuming 1:1 mapping) and some various perf data: python 2.6: reversedict = dict([(value, key) for key, value in mydict.iteritems()]) 2.7+: reversedict = {value:key for key, value in mydict.iteritems()} if you think it's not 1:1, you can still create a reasonable reverse mapping with a couple lines: reversedict = defaultdict(list) [reversedict[value].append(key) for key, value in mydict.iteritems()] how slow is this: slower than a simple search, but not nearly as slow as you'd think - on a 'straight' 100000 entry dictionary, a 'fast' search (i.e. looking for a value that should be early in the keys) was about 10x faster than reversing the entire dictionary, and a 'slow' search (towards the end) about 4-5x faster. So after at most about 10 lookups, it's paid for itself. the second version (with lists per item) takes about 2.5x as long as the simple version. largedict = dict((x,x) for x in range(100000)) # Should be slow, has to search 90000 entries before it finds it In [26]: %timeit largedict.keys()[largedict.values().index(90000)] 100 loops, best of 3: 4.81 ms per loop # Should be fast, has to only search 9 entries to find it. In [27]: %timeit largedict.keys()[largedict.values().index(9)] 100 loops, best of 3: 2.94 ms per loop # How about using iterkeys() instead of keys()? # These are faster, because you don't have to create the entire keys array. # You DO have to create the entire values array - more on that later. In [31]: %timeit islice(largedict.iterkeys(), largedict.values().index(90000)) 100 loops, best of 3: 3.38 ms per loop In [32]: %timeit islice(largedict.iterkeys(), largedict.values().index(9)) 1000 loops, best of 3: 1.48 ms per loop In [24]: %timeit reversedict = dict([(value, key) for key, value in largedict.iteritems()]) 10 loops, best of 3: 22.9 ms per loop In [23]: %%timeit ....: reversedict = defaultdict(list) ....: [reversedict[value].append(key) for key, value in largedict.iteritems()] ....: 10 loops, best of 3: 53.6 ms per loop Also had some interesting results with ifilter. Theoretically, ifilter should be faster, in that we can use itervalues() and possibly not have to create/go through the entire values list. In practice, the results were... odd... In [72]: %%timeit ....: myf = ifilter(lambda x: x[1] == 90000, largedict.iteritems()) ....: myf.next()[0] ....: 100 loops, best of 3: 15.1 ms per loop In [73]: %%timeit ....: myf = ifilter(lambda x: x[1] == 9, largedict.iteritems()) ....: myf.next()[0] ....: 100000 loops, best of 3: 2.36 us per loop So, for small offsets, it was dramatically faster than any previous version (2.36 *u*S vs. a minimum of 1.48 *m*S for previous cases). However, for large offsets near the end of the list, it was dramatically slower (15.1ms vs. the same 1.48mS). The small savings at the low end is not worth the cost at the high end, imho. A: Cat Plus Plus mentioned that this isn't how a dictionary is intended to be used. Here's why: The definition of a dictionary is analogous to that of a mapping in mathematics. In this case, a dict is a mapping of K (the set of keys) to V (the values) - but not vice versa. If you dereference a dict, you expect to get exactly one value returned. But, it is perfectly legal for different keys to map onto the same value, e.g.: d = { k1 : v1, k2 : v2, k3 : v1} When you look up a key by it's corresponding value, you're essentially inverting the dictionary. But a mapping isn't necessarily invertible! In this example, asking for the key corresponding to v1 could yield k1 or k3. Should you return both? Just the first one found? That's why indexof() is undefined for dictionaries. If you know your data, you could do this. But an API can't assume that an arbitrary dictionary is invertible, hence the lack of such an operation. A: here is my take on it. This is good for displaying multiple results just in case you need one. So I added the list as well myList = {'george':16,'amber':19, 'rachel':19, 'david':15 } #Setting the dictionary result=[] #Making ready of the result list search_age = int(input('Enter age ')) for keywords in myList.keys(): if myList[keywords] ==search_age: result.append(keywords) #This part, we are making list of results for res in result: #We are now printing the results print(res) And that's it... A: There is no easy way to find a key in a list by 'looking up' the value. However, if you know the value, iterating through the keys, you can look up values in the dictionary by the element. If D[element] where D is a dictionary object, is equal to the key you're trying to look up, you can execute some code. D = {'Ali': 20, 'Marina': 12, 'George':16} age = int(input('enter age:\t')) for element in D.keys(): if D[element] == age: print(element) A: You need to use a dictionary and reverse of that dictionary. It means you need another data structure. If you are in python 3, use enum module but if you are using python 2.7 use enum34 which is back ported for python 2. Example: from enum import Enum class Color(Enum): red = 1 green = 2 blue = 3 >>> print(Color.red) Color.red >>> print(repr(Color.red)) <color.red: 1=""> >>> type(Color.red) <enum 'color'=""> >>> isinstance(Color.green, Color) True >>> member = Color.red >>> member.name 'red' >>> member.value 1 A: def get_Value(dic,value): for name in dic: if dic[name] == value: del dic[name] return name A: Sometimes int() may be needed: titleDic = {'Фильмы':1, 'Музыка':2} def categoryTitleForNumber(self, num): search_title = '' for title, titleNum in self.titleDic.items(): if int(titleNum) == int(num): search_title = title return search_title A: This is how you access the dictionary to do what you want: list = {'george': 16, 'amber': 19} search_age = raw_input("Provide age") for age in list: if list[age] == search_age: print age of course, your names are so off it looks like it would be printing an age, but it DOES print the name. Since you are accessing by name, it becomes more understandable if you write: list = {'george': 16, 'amber': 19} search_age = raw_input("Provide age") for name in list: if list[name] == search_age: print name Better yet: people = {'george': {'age': 16}, 'amber': {'age': 19}} search_age = raw_input("Provide age") for name in people: if people[name]['age'] == search_age: print name A: This is kind of a strange question because the very first comment provides a perfect answer. Based on the sample data example provided dictionary = {'george': 16, 'amber': 19} print(dictionary["george"]) It returns 16 So you want the opposite to enter "16" and get "george" So simply swap keys,values and presto dictionary = {'george': 16, 'amber': 19} inv_dict = {value:key for key, value in dictionary.items()} print(inv_dict[16]) I was in the completely opposite position as i had a dictionary like {16:'george', 19:'amber'} and i was trying to feed "george" and get 16...i tried several kind of loops and iterators that OK..they work but it wasn't the easy one line solution that i would use for quick result...so i simply swapped and solution found. If i missed something please let me know to delete my answer. A: Here is a solution which works both in Python 2 and Python 3: dict((v, k) for k, v in list.items())[search_age] The part until [search_age] constructs the reverse dictionary (where values are keys and vice-versa). You could create a helper method which will cache this reversed dictionary like so: def find_name(age, _rev_lookup=dict((v, k) for k, v in ages_by_name.items())): return _rev_lookup[age] or even more generally a factory which would create a by-age name lookup method for one or more of you lists def create_name_finder(ages_by_name): names_by_age = dict((v, k) for k, v in ages_by_name.items()) def find_name(age): return names_by_age[age] so you would be able to do: find_teen_by_age = create_name_finder({'george':16,'amber':19}) ... find_teen_by_age(search_age) Note that I renamed list to ages_by_name since the former is a predefined type. A: dictionary = {'george' : 16, 'amber' : 19} search_age = raw_input("Provide age") key = [filter( lambda x: dictionary[x] == k , dictionary ),[None]][0] # key = None from [None] which is a safeguard for not found. For multiple occurrences use: keys = [filter( lambda x: dictionary[x] == k , dictionary )] A: I realize it's been a long time and the original asker likely no longer has any need of an answer, but none of these are good answers if you actually have control over this code. You're just using the wrong data structure. This is a perfect illustration of the use case for a two-way dict: >>> from collections import defaultdict, UserDict >>> class TwoWayDict(UserDict): ... def __init__(self, *args, **kwargs): ... super().__init__(*args, **kwargs) ... self.val_to_keys = defaultdict(list) ... def __setitem__(self, key, value): ... super().__setitem__(key, value) ... self.val_to_keys[value].append(key) ... def get_keys_for_val(self, value): ... return self.val_to_keys[value] ... >>> d = TwoWayDict() >>> d['a'] = 1 >>> d['b'] = 1 >>> d.get_keys_for_val(1) ['a', 'b'] Adds miniscule overhead to insertions but you keep constant-time lookup, except now in both directions. No need to construct the reverse mapping from scratch every time you need it. Just store it as you go and access it as needed. Further, many of these answers are not even correct because clearly many people can have the same age but they're only returning the first matching key, not all of them. A: Heres a truly "Reversible Dictionary", Based upon Adam Acosta's solution, but enforcing val-to-key calls to be unique and easily return key from value: from collections import UserDict class ReversibleDict(UserDict): def __init__(self, enforce_unique=True, *args, **kwargs): super().__init__(*args, **kwargs) self.val_to_keys = {} self.check_val = self.check_unique if enforce_unique else lambda x: x def __setitem__(self, key, value): self.check_val(value) super().__setitem__(key, value) self.val_to_keys[value] = key def __call__(self, value): return self.val_to_keys[value] def check_unique(self, value): assert value not in self.val_to_keys, f"Non unique value '{value}'" return value If you want to enforce uniqueness on dictionary values ensure to set enforce_unique=True. to get keys from values just do rev_dict(value), to call values from keys just do as usual dict['key'], here's an example of usage: rev_dict = ReversibleDict(enforce_unique=True) rev_dict["a"] = 1 rev_dict["b"] = 2 rev_dict["c"] = 3 print("full dictinoary is: ", rev_dict) print("value for key 'b' is: ", rev_dict["b"]) print("key for value '2' is: ", rev_dict(2)) print("tring to set another key with the same value results in error: ") rev_dict["d"] = 1 A: As someone mentioned there might be more than one key that have the same value, like my_dict below. Moreover, there might be no matching key. my_dict ={'k1':1,'k2':2, 'k3':1, 'k4':12, 'k5':1, 'k6':1, 'k7':12} Here are three ways of finding a key, one for the last hit, and two for the first. def find_last(search_value:int, d:dict): return [x for x,y in d.items() if y==search_value].pop() def find_first1(search_value:int, d:dict): return next(filter(lambda x: d[x]==search_value, d.keys()), None) def find_first2(search_value:int, d:dict): return next(x for x,y in d.items() if y==search_value) Of these find_first1 is a bit faster than the others, and will return None in case there is no matching key. A: A simple way to do this could be: list = {'george':16,'amber':19} search_age = raw_input("Provide age") for age in list.values(): name = list[list==search_age].key().tolist() print name This will return a list of the keys with value that match search_age. You can also replace "list==search_age" with any other conditions statement if needed. A: In my case the easiest way is to instantiate disctionary in your code then you can call keys from it like below here is my class having dictionary class Config: def local(self): return { "temp_dir": "/tmp/dirtest/", "devops": "Mansur", } To instantiate your dictionary config = vars.Config() local_config = config.local() Finally calling your dictionary keys patched = local_config.get("devops") A: I ended up doing it with a function. This way you might avoid doing the full loop, and the intuition says that it should be faster than other solutions presented. def get_key_from_value(my_dict, to_find): for k,v in my_dict.items(): if v==to_find: return k return None A: I was looking for this same question and I ended up with my variant: found_key = [a[0] for a in dict.items() if a[1] == 'value'][0] Only for those situations when a key has a unique value (which was my case). A: dict_a = {'length': 5, 'width': 9, 'height': 4} # get the key of specific value 5 key_of_value = list(dict_a)[list(dict_a.values()).index(5)] print(key_of_value) # length # get the key of minimum value key_min_value = list(dict_a)[list(dict_a.values()).index(sorted(dict_a.values())[0])] print(key_min_value) # height # get the key of maximum value key_max_value = list(dict_a)[list(dict_a.values()).index(sorted(dict_a.values(), reverse=True)[0])] print(key_max_value) # width
Get key by value in dictionary
I made a function which will look up ages in a Dictionary and show the matching name: dictionary = {'george' : 16, 'amber' : 19} search_age = raw_input("Provide age") for age in dictionary.values(): if age == search_age: name = dictionary[age] print name I know how to compare and find the age I just don't know how to show the name of the person. Additionally, I am getting a KeyError because of line 5. I know it's not correct but I can't figure out how to make it search backwards.
[ "mydict = {'george': 16, 'amber': 19}\nprint mydict.keys()[mydict.values().index(16)] # Prints george\n\nOr in Python 3.x:\nmydict = {'george': 16, 'amber': 19}\nprint(list(mydict.keys())[list(mydict.values()).index(16)]) # Prints george\n\nBasically, it separates the dictionary's values in a list, finds the position of the value you have, and gets the key at that position.\nMore about keys() and .values() in Python 3: How can I get list of values from dict?\n", "There is none. dict is not intended to be used this way.\ndictionary = {'george': 16, 'amber': 19}\nsearch_age = input(\"Provide age\")\nfor name, age in dictionary.items(): # for name, age in dictionary.iteritems(): (for Python 2.x)\n if age == search_age:\n print(name)\n\n", "If you want both the name and the age, you should be using .items() which gives you key (key, value) tuples:\nfor name, age in mydict.items():\n if age == search_age:\n print name\n\nYou can unpack the tuple into two separate variables right in the for loop, then match the age.\nYou should also consider reversing the dictionary if you're generally going to be looking up by age, and no two people have the same age:\n{16: 'george', 19: 'amber'}\n\nso you can look up the name for an age by just doing\nmydict[search_age]\n\nI've been calling it mydict instead of list because list is the name of a built-in type, and you shouldn't use that name for anything else.\nYou can even get a list of all people with a given age in one line:\n[name for name, age in mydict.items() if age == search_age]\n\nor if there is only one person with each age:\nnext((name for name, age in mydict.items() if age == search_age), None)\n\nwhich will just give you None if there isn't anyone with that age.\nFinally, if the dict is long and you're on Python 2, you should consider using .iteritems() instead of .items() as Cat Plus Plus did in his answer, since it doesn't need to make a copy of the list.\n", "I thought it would be interesting to point out which methods are the quickest, and in what scenario:\nHere's some tests I ran (on a 2012 MacBook Pro)\ndef method1(dict, search_age):\n for name, age in dict.iteritems():\n if age == search_age:\n return name\n\ndef method2(dict, search_age):\n return [name for name,age in dict.iteritems() if age == search_age]\n\ndef method3(dict, search_age):\n return dict.keys()[dict.values().index(search_age)]\n\nResults from profile.run() on each method 100,000 times:\nMethod 1:\n>>> profile.run(\"for i in range(0,100000): method1(dict, 16)\")\n 200004 function calls in 1.173 seconds\n\nMethod 2:\n>>> profile.run(\"for i in range(0,100000): method2(dict, 16)\")\n 200004 function calls in 1.222 seconds\n\nMethod 3:\n>>> profile.run(\"for i in range(0,100000): method3(dict, 16)\")\n 400004 function calls in 2.125 seconds\n\nSo this shows that for a small dict, method 1 is the quickest. This is most likely because it returns the first match, as opposed to all of the matches like method 2 (see note below).\n\nInterestingly, performing the same tests on a dict I have with 2700 entries, I get quite different results (this time run 10,000 times):\nMethod 1:\n>>> profile.run(\"for i in range(0,10000): method1(UIC_CRS,'7088380')\")\n 20004 function calls in 2.928 seconds\n\nMethod 2:\n>>> profile.run(\"for i in range(0,10000): method2(UIC_CRS,'7088380')\")\n 20004 function calls in 3.872 seconds\n\nMethod 3:\n>>> profile.run(\"for i in range(0,10000): method3(UIC_CRS,'7088380')\")\n 40004 function calls in 1.176 seconds\n\nSo here, method 3 is much faster. Just goes to show the size of your dict will affect which method you choose.\nNotes:\n\nMethod 2 returns a list of all names, whereas methods 1 and 3 return only the first match.\nI have not considered memory usage. I'm not sure if method 3 creates 2 extra lists (keys() and values()) and stores them in memory.\n\n", "one line version: (i is an old dictionary, p is a reversed dictionary)\nexplanation : i.keys() and i.values() returns two lists with keys and values of the dictionary respectively. The zip function has the ability to tie together lists to produce a dictionary.\np = dict(zip(i.values(),i.keys()))\n\nWarning : This will work only if the values are hashable and unique.\n", "I found this answer very effective but not very easy to read for me.\nTo make it more clear you can invert the key and the value of a dictionary. This is make the keys values and the values keys, as seen here.\nmydict = {'george':16,'amber':19}\nres = dict((v,k) for k,v in mydict.iteritems())\nprint(res[16]) # Prints george\n\nor for Python 3, (thanks @kkgarg)\nmydict = {'george':16,'amber':19}\nres = dict((v,k) for k,v in mydict.items())\nprint(res[16]) # Prints george\n\nAlso\nprint(res.get(16)) # Prints george\n\nwhich is essentially the same that this other answer.\n", "a = {'a':1,'b':2,'c':3}\n{v:k for k, v in a.items()}[1]\n\nor better\n{k:v for k, v in a.items() if v == 1}\n\n", "key = next((k for k in my_dict if my_dict[k] == val), None)\n\n", "Try this one-liner to reverse a dictionary:\nreversed_dictionary = dict(map(reversed, dictionary.items()))\n\n", "If you want to find the key by the value, you can use a dictionary comprehension to create a lookup dictionary and then use that to find the key from the value.\nlookup = {value: key for key, value in self.data}\nlookup[value]\n\n", "we can get the Key of dict by :\ndef getKey(dct,value):\n return [key for key in dct if (dct[key] == value)]\n\n", "You can get key by using dict.keys(), dict.values() and list.index() methods, see code samples below:\nnames_dict = {'george':16,'amber':19}\nsearch_age = int(raw_input(\"Provide age\"))\nkey = names_dict.keys()[names_dict.values().index(search_age)]\n\n", "Here is my take on this problem. :)\nI have just started learning Python, so I call this:\n\"The Understandable for beginners\" solution.\n#Code without comments.\n\nlist1 = {'george':16,'amber':19, 'Garry':19}\nsearch_age = raw_input(\"Provide age: \")\nprint\nsearch_age = int(search_age)\n\nlistByAge = {}\n\nfor name, age in list1.items():\n if age == search_age:\n age = str(age)\n results = name + \" \" +age\n print results\n\n age2 = int(age)\n listByAge[name] = listByAge.get(name,0)+age2\n\nprint\nprint listByAge\n\n.\n#Code with comments.\n#I've added another name with the same age to the list.\nlist1 = {'george':16,'amber':19, 'Garry':19}\n#Original code.\nsearch_age = raw_input(\"Provide age: \")\nprint\n#Because raw_input gives a string, we need to convert it to int,\n#so we can search the dictionary list with it.\nsearch_age = int(search_age)\n\n#Here we define another empty dictionary, to store the results in a more \n#permanent way.\nlistByAge = {}\n\n#We use double variable iteration, so we get both the name and age \n#on each run of the loop.\nfor name, age in list1.items():\n #Here we check if the User Defined age = the age parameter \n #for this run of the loop.\n if age == search_age:\n #Here we convert Age back to string, because we will concatenate it \n #with the person's name. \n age = str(age)\n #Here we concatenate.\n results = name + \" \" +age\n #If you want just the names and ages displayed you can delete\n #the code after \"print results\". If you want them stored, don't...\n print results\n\n #Here we create a second variable that uses the value of\n #the age for the current person in the list.\n #For example if \"Anna\" is \"10\", age2 = 10,\n #integer value which we can use in addition.\n age2 = int(age)\n #Here we use the method that checks or creates values in dictionaries.\n #We create a new entry for each name that matches the User Defined Age\n #with default value of 0, and then we add the value from age2.\n listByAge[name] = listByAge.get(name,0)+age2\n\n#Here we print the new dictionary with the users with User Defined Age.\nprint\nprint listByAge\n\n.\n#Results\nRunning: *\\test.py (Thu Jun 06 05:10:02 2013)\n\nProvide age: 19\n\namber 19\nGarry 19\n\n{'amber': 19, 'Garry': 19}\n\nExecution Successful!\n\n", "get_key = lambda v, d: next(k for k in d if d[k] is v)\n\n", "Consider using Pandas. As stated in William McKinney's \"Python for Data Analysis'\n\nAnother way to think about a Series is as a fixed-length, ordered\n dict, as it is a mapping of index values to data values. It can be\n used in many contexts where you might use a dict.\n\nimport pandas as pd\nlist = {'george':16,'amber':19}\nlookup_list = pd.Series(list)\n\nTo query your series do the following:\nlookup_list[lookup_list.values == 19]\n\nWhich yields:\nOut[1]: \namber 19\ndtype: int64\n\nIf you need to do anything else with the output transforming the \nanswer into a list might be useful:\nanswer = lookup_list[lookup_list.values == 19].index\nanswer = pd.Index.tolist(answer)\n\n", "d= {'george':16,'amber':19}\n\ndict((v,k) for k,v in d.items()).get(16)\n\nThe output is as follows:\n-> prints george\n\n", "Here, recover_key takes dictionary and value to find in dictionary. We then loop over the keys in dictionary and make a comparison with that of value and return that particular key.\ndef recover_key(dicty,value):\n for a_key in dicty.keys():\n if (dicty[a_key] == value):\n return a_key\n\n", "for name in mydict:\n if mydict[name] == search_age:\n print(name) \n #or do something else with it. \n #if in a function append to a temporary list, \n #then after the loop return the list\n\n", "my_dict = {'A': 19, 'B': 28, 'carson': 28}\nsearch_age = 28\n\ntake only one\nname = next((name for name, age in my_dict.items() if age == search_age), None)\nprint(name) # 'B'\n\nget multiple data\nname_list = [name for name, age in filter(lambda item: item[1] == search_age, my_dict.items())]\nprint(name_list) # ['B', 'carson']\n\n", "I glimpsed all answers and none mentioned simply using list comprehension?\nThis Pythonic one-line solution can return all keys for any number of given values (tested in Python 3.9.1):\n>>> dictionary = {'george' : 16, 'amber' : 19, 'frank': 19}\n>>>\n>>> age = 19\n>>> name = [k for k in dictionary.keys() if dictionary[k] == age]; name\n['george', 'frank']\n>>>\n>>> age = (16, 19)\n>>> name = [k for k in dictionary.keys() if dictionary[k] in age]; name\n['george', 'amber', 'frank']\n>>>\n>>> age = (22, 25)\n>>> name = [k for k in dictionary.keys() if dictionary[k] in age]; name\n[]\n\n", "it's answered, but it could be done with a fancy 'map/reduce' use, e.g.:\ndef find_key(value, dictionary):\n return reduce(lambda x, y: x if x is not None else y,\n map(lambda x: x[0] if x[1] == value else None, \n dictionary.iteritems()))\n\n", "I tried to read as many solutions as I can to prevent giving duplicate answer. However, if you are working on a dictionary which values are contained in lists and if you want to get keys that have a particular element you could do this:\nd = {'Adams': [18, 29, 30],\n 'Allen': [9, 27],\n 'Anderson': [24, 26],\n 'Bailey': [7, 30],\n 'Baker': [31, 7, 10, 19],\n 'Barnes': [22, 31, 10, 21],\n 'Bell': [2, 24, 17, 26]}\n\nNow lets find names that have 24 in their values.\nfor key in d.keys(): \n if 24 in d[key]:\n print(key)\n\nThis would work with multiple values as well. \n", "Just my answer in lambda and filter.\nfilter( lambda x, dictionary=dictionary, search_age=int(search_age): dictionary[x] == search_age , dictionary )\n\n", "One line solution using list comprehension, which returns multiple keys if the value is possibly present multiple times.\n[key for key,value in mydict.items() if value == 16]\n\n", "already been answered, but since several people mentioned reversing the dictionary, here's how you do it in one line (assuming 1:1 mapping) and some various perf data:\npython 2.6:\nreversedict = dict([(value, key) for key, value in mydict.iteritems()])\n\n2.7+:\nreversedict = {value:key for key, value in mydict.iteritems()}\n\nif you think it's not 1:1, you can still create a reasonable reverse mapping with a couple lines:\nreversedict = defaultdict(list)\n[reversedict[value].append(key) for key, value in mydict.iteritems()]\n\nhow slow is this: slower than a simple search, but not nearly as slow as you'd think - on a 'straight' 100000 entry dictionary, a 'fast' search (i.e. looking for a value that should be early in the keys) was about 10x faster than reversing the entire dictionary, and a 'slow' search (towards the end) about 4-5x faster. So after at most about 10 lookups, it's paid for itself. \nthe second version (with lists per item) takes about 2.5x as long as the simple version.\nlargedict = dict((x,x) for x in range(100000))\n\n# Should be slow, has to search 90000 entries before it finds it\nIn [26]: %timeit largedict.keys()[largedict.values().index(90000)]\n100 loops, best of 3: 4.81 ms per loop\n\n# Should be fast, has to only search 9 entries to find it. \nIn [27]: %timeit largedict.keys()[largedict.values().index(9)]\n100 loops, best of 3: 2.94 ms per loop\n\n# How about using iterkeys() instead of keys()?\n# These are faster, because you don't have to create the entire keys array.\n# You DO have to create the entire values array - more on that later.\n\nIn [31]: %timeit islice(largedict.iterkeys(), largedict.values().index(90000))\n100 loops, best of 3: 3.38 ms per loop\n\nIn [32]: %timeit islice(largedict.iterkeys(), largedict.values().index(9))\n1000 loops, best of 3: 1.48 ms per loop\n\nIn [24]: %timeit reversedict = dict([(value, key) for key, value in largedict.iteritems()])\n10 loops, best of 3: 22.9 ms per loop\n\nIn [23]: %%timeit\n....: reversedict = defaultdict(list)\n....: [reversedict[value].append(key) for key, value in largedict.iteritems()]\n....:\n10 loops, best of 3: 53.6 ms per loop\n\nAlso had some interesting results with ifilter. Theoretically, ifilter should be faster, in that we can use itervalues() and possibly not have to create/go through the entire values list. In practice, the results were... odd...\nIn [72]: %%timeit\n....: myf = ifilter(lambda x: x[1] == 90000, largedict.iteritems())\n....: myf.next()[0]\n....:\n100 loops, best of 3: 15.1 ms per loop\n\nIn [73]: %%timeit\n....: myf = ifilter(lambda x: x[1] == 9, largedict.iteritems())\n....: myf.next()[0]\n....:\n100000 loops, best of 3: 2.36 us per loop\n\nSo, for small offsets, it was dramatically faster than any previous version (2.36 *u*S vs. a minimum of 1.48 *m*S for previous cases). However, for large offsets near the end of the list, it was dramatically slower (15.1ms vs. the same 1.48mS). The small savings at the low end is not worth the cost at the high end, imho. \n", "Cat Plus Plus mentioned that this isn't how a dictionary is intended to be used. Here's why:\nThe definition of a dictionary is analogous to that of a mapping in mathematics. In this case, a dict is a mapping of K (the set of keys) to V (the values) - but not vice versa. If you dereference a dict, you expect to get exactly one value returned. But, it is perfectly legal for different keys to map onto the same value, e.g.:\nd = { k1 : v1, k2 : v2, k3 : v1}\n\nWhen you look up a key by it's corresponding value, you're essentially inverting the dictionary. But a mapping isn't necessarily invertible! In this example, asking for the key corresponding to v1 could yield k1 or k3. Should you return both? Just the first one found? That's why indexof() is undefined for dictionaries.\nIf you know your data, you could do this. But an API can't assume that an arbitrary dictionary is invertible, hence the lack of such an operation.\n", "here is my take on it. This is good for displaying multiple results just in case you need one. So I added the list as well \nmyList = {'george':16,'amber':19, 'rachel':19, \n 'david':15 } #Setting the dictionary\nresult=[] #Making ready of the result list\nsearch_age = int(input('Enter age '))\n\nfor keywords in myList.keys():\n if myList[keywords] ==search_age:\n result.append(keywords) #This part, we are making list of results\n\nfor res in result: #We are now printing the results\n print(res)\n\nAnd that's it... \n", "There is no easy way to find a key in a list by 'looking up' the value. However, if you know the value, iterating through the keys, you can look up values in the dictionary by the element. If D[element] where D is a dictionary object, is equal to the key you're trying to look up, you can execute some code.\nD = {'Ali': 20, 'Marina': 12, 'George':16}\nage = int(input('enter age:\\t')) \nfor element in D.keys():\n if D[element] == age:\n print(element)\n\n", "You need to use a dictionary and reverse of that dictionary. It means you need another data structure. If you are in python 3, use enum module but if you are using python 2.7 use enum34 which is back ported for python 2.\nExample:\nfrom enum import Enum\n\nclass Color(Enum): \n red = 1 \n green = 2 \n blue = 3\n\n>>> print(Color.red) \nColor.red\n\n>>> print(repr(Color.red)) \n<color.red: 1=\"\"> \n\n>>> type(Color.red) \n<enum 'color'=\"\"> \n>>> isinstance(Color.green, Color) \nTrue \n\n>>> member = Color.red \n>>> member.name \n'red' \n>>> member.value \n1 \n\n", "def get_Value(dic,value):\n for name in dic:\n if dic[name] == value:\n del dic[name]\n return name\n\n", "Sometimes int() may be needed:\ntitleDic = {'Фильмы':1, 'Музыка':2}\n\ndef categoryTitleForNumber(self, num):\n search_title = ''\n for title, titleNum in self.titleDic.items():\n if int(titleNum) == int(num):\n search_title = title\n return search_title\n\n", "This is how you access the dictionary to do what you want:\nlist = {'george': 16, 'amber': 19}\nsearch_age = raw_input(\"Provide age\")\nfor age in list:\n if list[age] == search_age:\n print age\n\nof course, your names are so off it looks like it would be printing an age, but it DOES print the name. Since you are accessing by name, it becomes more understandable if you write:\nlist = {'george': 16, 'amber': 19}\nsearch_age = raw_input(\"Provide age\")\nfor name in list:\n if list[name] == search_age:\n print name\n\nBetter yet: \npeople = {'george': {'age': 16}, 'amber': {'age': 19}}\nsearch_age = raw_input(\"Provide age\")\nfor name in people:\n if people[name]['age'] == search_age:\n print name\n\n", "This is kind of a strange question because the very first comment provides a perfect answer.\nBased on the sample data example provided\ndictionary = {'george': 16, 'amber': 19}\nprint(dictionary[\"george\"])\n\nIt returns\n16\n\nSo you want the opposite\nto enter \"16\" and get \"george\"\nSo simply swap keys,values and presto\ndictionary = {'george': 16, 'amber': 19}\ninv_dict = {value:key for key, value in dictionary.items()}\nprint(inv_dict[16])\n\nI was in the completely opposite position as i had a dictionary like\n{16:'george', 19:'amber'}\n\nand i was trying to feed \"george\" and get 16...i tried several kind of loops and iterators that OK..they work but it wasn't the easy one line solution that i would use for quick result...so i simply swapped and solution found.\nIf i missed something please let me know to delete my answer.\n", "Here is a solution which works both in Python 2 and Python 3:\ndict((v, k) for k, v in list.items())[search_age]\n\nThe part until [search_age] constructs the reverse dictionary (where values are keys and vice-versa).\nYou could create a helper method which will cache this reversed dictionary like so:\ndef find_name(age, _rev_lookup=dict((v, k) for k, v in ages_by_name.items())):\n return _rev_lookup[age]\n\nor even more generally a factory which would create a by-age name lookup method for one or more of you lists\ndef create_name_finder(ages_by_name):\n names_by_age = dict((v, k) for k, v in ages_by_name.items())\n def find_name(age):\n return names_by_age[age]\n\nso you would be able to do:\nfind_teen_by_age = create_name_finder({'george':16,'amber':19})\n...\nfind_teen_by_age(search_age)\n\nNote that I renamed list to ages_by_name since the former is a predefined type.\n", "dictionary = {'george' : 16, 'amber' : 19}\nsearch_age = raw_input(\"Provide age\")\nkey = [filter( lambda x: dictionary[x] == k , dictionary ),[None]][0] \n# key = None from [None] which is a safeguard for not found.\n\nFor multiple occurrences use:\nkeys = [filter( lambda x: dictionary[x] == k , dictionary )]\n\n", "I realize it's been a long time and the original asker likely no longer has any need of an answer, but none of these are good answers if you actually have control over this code. You're just using the wrong data structure. This is a perfect illustration of the use case for a two-way dict:\n>>> from collections import defaultdict, UserDict\n>>> class TwoWayDict(UserDict):\n... def __init__(self, *args, **kwargs):\n... super().__init__(*args, **kwargs)\n... self.val_to_keys = defaultdict(list)\n... def __setitem__(self, key, value):\n... super().__setitem__(key, value)\n... self.val_to_keys[value].append(key)\n... def get_keys_for_val(self, value):\n... return self.val_to_keys[value]\n... \n>>> d = TwoWayDict()\n>>> d['a'] = 1\n>>> d['b'] = 1\n>>> d.get_keys_for_val(1)\n['a', 'b']\n\nAdds miniscule overhead to insertions but you keep constant-time lookup, except now in both directions. No need to construct the reverse mapping from scratch every time you need it. Just store it as you go and access it as needed.\nFurther, many of these answers are not even correct because clearly many people can have the same age but they're only returning the first matching key, not all of them.\n", "Heres a truly \"Reversible Dictionary\", Based upon Adam Acosta's solution, but enforcing val-to-key calls to be unique and easily return key from value:\nfrom collections import UserDict\n\n\nclass ReversibleDict(UserDict):\n def __init__(self, enforce_unique=True, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.val_to_keys = {}\n self.check_val = self.check_unique if enforce_unique else lambda x: x\n\n def __setitem__(self, key, value):\n self.check_val(value)\n super().__setitem__(key, value)\n self.val_to_keys[value] = key\n\n def __call__(self, value):\n return self.val_to_keys[value]\n\n def check_unique(self, value):\n assert value not in self.val_to_keys, f\"Non unique value '{value}'\"\n return value\n\nIf you want to enforce uniqueness on dictionary values ensure to set enforce_unique=True. to get keys from values just do rev_dict(value), to call values from keys just do as usual dict['key'], here's an example of usage:\nrev_dict = ReversibleDict(enforce_unique=True)\nrev_dict[\"a\"] = 1\nrev_dict[\"b\"] = 2\nrev_dict[\"c\"] = 3\nprint(\"full dictinoary is: \", rev_dict)\nprint(\"value for key 'b' is: \", rev_dict[\"b\"])\nprint(\"key for value '2' is: \", rev_dict(2))\nprint(\"tring to set another key with the same value results in error: \")\nrev_dict[\"d\"] = 1\n\n", "As someone mentioned there might be more than one key that have the same value, like my_dict below. Moreover, there might be no matching key.\nmy_dict ={'k1':1,'k2':2, 'k3':1, 'k4':12, 'k5':1, 'k6':1, 'k7':12}\n\nHere are three ways of finding a key, one for the last hit, and two for the first.\ndef find_last(search_value:int, d:dict):\n \n return [x for x,y in d.items() if y==search_value].pop()\n\ndef find_first1(search_value:int, d:dict):\n return next(filter(lambda x: d[x]==search_value, d.keys()), None)\n\ndef find_first2(search_value:int, d:dict):\n return next(x for x,y in d.items() if y==search_value)\n\nOf these find_first1 is a bit faster than the others, and will return None in case there is no matching key.\n", "A simple way to do this could be:\nlist = {'george':16,'amber':19}\nsearch_age = raw_input(\"Provide age\")\nfor age in list.values():\n name = list[list==search_age].key().tolist()\n print name\n\nThis will return a list of the keys with value that match search_age. You can also replace \"list==search_age\" with any other conditions statement if needed.\n", "In my case the easiest way is to instantiate disctionary in your code then you can call keys from it like below\nhere is my class having dictionary \nclass Config:\n\ndef local(self):\n return {\n \"temp_dir\": \"/tmp/dirtest/\",\n \"devops\": \"Mansur\",\n }\n\nTo instantiate your dictionary\nconfig = vars.Config()\nlocal_config = config.local()\n\nFinally calling your dictionary keys\npatched = local_config.get(\"devops\")\n\n", "I ended up doing it with a function. This way you might avoid doing the full loop, and the intuition says that it should be faster than other solutions presented.\ndef get_key_from_value(my_dict, to_find):\n\n for k,v in my_dict.items():\n if v==to_find: return k\n\n return None\n\n", "I was looking for this same question and I ended up with my variant:\nfound_key = [a[0] for a in dict.items() if a[1] == 'value'][0]\nOnly for those situations when a key has a unique value (which was my case).\n", "dict_a = {'length': 5, 'width': 9, 'height': 4}\n\n# get the key of specific value 5\nkey_of_value = list(dict_a)[list(dict_a.values()).index(5)]\nprint(key_of_value) # length\n\n# get the key of minimum value\nkey_min_value = list(dict_a)[list(dict_a.values()).index(sorted(dict_a.values())[0])]\nprint(key_min_value) # height\n\n# get the key of maximum value\nkey_max_value = list(dict_a)[list(dict_a.values()).index(sorted(dict_a.values(), reverse=True)[0])]\nprint(key_max_value) # width\n\n\n\n" ]
[ 892, 680, 334, 108, 85, 45, 40, 35, 21, 15, 12, 11, 10, 10, 8, 6, 6, 5, 5, 5, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 ]
[]
[]
[ "dictionary", "python" ]
stackoverflow_0008023306_dictionary_python.txt
Q: how can i write comments on some cells of excel sheet using pandas I didn't find anything that enable me to write comments on some specific cell while writing excel sheet using panadas.to_excel . Any help is appreciated. A: After searching for some time, I think the best way to handle comments or other such properties like color and size of text at cell or sheet level is to use XlsxWriter with pandas. Here is the link to the some nice examples of using XlsxWriter with pandas: http://xlsxwriter.readthedocs.org/working_with_pandas.html A: My reputation is too low to write a comment... The given link by Randhawa does not provide any information about how to add comments. You can refer to this link https://xlsxwriter.readthedocs.io/working_with_cell_comments.html, which specifies how you can add comments with XlsxWriter. worksheet.write('A1', 'Hello') worksheet.write_comment('A1', 'This is a comment') A: This is a working example based on the useful web pages linked to by Randhawa and Carsten: import pandas as pd # Create a Pandas dataframe df = pd.DataFrame({"Data": [10, 20, 30, 20, 15, 30, 45]}) # Create a Pandas Excel writer using XlsxWriter as the engine writer = pd.ExcelWriter("pandas_simple.xlsx", engine="xlsxwriter") # Convert the dataframe to an XlsxWriter Excel object (sheet) df.to_excel(writer, sheet_name="Sheet1") # Get the xlsxwriter object for the sheet where you will write a comment workbook = writer.book worksheet = writer.sheets["Sheet1"] # Add comment to cell A1 in worksheet ("Sheet1"), set to visible worksheet.write_comment("A1", "This is a comment", {"visible": True}) # Write the data (sheets) to the workbook writer.close()
how can i write comments on some cells of excel sheet using pandas
I didn't find anything that enable me to write comments on some specific cell while writing excel sheet using panadas.to_excel . Any help is appreciated.
[ "After searching for some time, I think the best way to handle comments or other such properties like color and size of text at cell or sheet level is to use XlsxWriter with pandas.\nHere is the link to the some nice examples of using XlsxWriter with pandas:\nhttp://xlsxwriter.readthedocs.org/working_with_pandas.html\n\n", "My reputation is too low to write a comment...\nThe given link by Randhawa does not provide any information about how to add comments. You can refer to this link https://xlsxwriter.readthedocs.io/working_with_cell_comments.html, which specifies how you can add comments with XlsxWriter.\nworksheet.write('A1', 'Hello')\nworksheet.write_comment('A1', 'This is a comment')\n\n", "This is a working example based on the useful web pages linked to by Randhawa and Carsten:\nimport pandas as pd\n\n# Create a Pandas dataframe\ndf = pd.DataFrame({\"Data\": [10, 20, 30, 20, 15, 30, 45]})\n\n# Create a Pandas Excel writer using XlsxWriter as the engine\nwriter = pd.ExcelWriter(\"pandas_simple.xlsx\", engine=\"xlsxwriter\")\n\n# Convert the dataframe to an XlsxWriter Excel object (sheet)\ndf.to_excel(writer, sheet_name=\"Sheet1\")\n\n# Get the xlsxwriter object for the sheet where you will write a comment \nworkbook = writer.book\nworksheet = writer.sheets[\"Sheet1\"]\n\n# Add comment to cell A1 in worksheet (\"Sheet1\"), set to visible\nworksheet.write_comment(\"A1\", \"This is a comment\", {\"visible\": True})\n\n# Write the data (sheets) to the workbook\nwriter.close()\n\n" ]
[ 4, 2, 1 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0036397805_pandas_python.txt
Q: Determine season given timestamp in Python using datetime I'd like to extract only the month and day from a timestamp using the datetime module (not time) and then determine if it falls within a given season (fall, summer, winter, spring) based on the fixed dates of the solstices and equinoxes. For instance, if the date falls between March 21 and June 20, it is spring. Regardless of the year. I want it to just look at the month and day and ignore the year in this calculation. I've been running into trouble using this because the month is not being extracted properly from my data, for this reason. A: if the date falls between March 21 and June 20, it is spring. Regardless of the year. I want it to just look at the month and day and ignore the year in this calculation. #!/usr/bin/env python from datetime import date, datetime Y = 2000 # dummy leap year to allow input X-02-29 (leap day) seasons = [('winter', (date(Y, 1, 1), date(Y, 3, 20))), ('spring', (date(Y, 3, 21), date(Y, 6, 20))), ('summer', (date(Y, 6, 21), date(Y, 9, 22))), ('autumn', (date(Y, 9, 23), date(Y, 12, 20))), ('winter', (date(Y, 12, 21), date(Y, 12, 31)))] def get_season(now): if isinstance(now, datetime): now = now.date() now = now.replace(year=Y) return next(season for season, (start, end) in seasons if start <= now <= end) print(get_season(date.today())) It is an extended version of @Manuel G answer to support any year. A: It might be easier just to use the day of year parameter. It's not much different than your approach, but possibly easier to understand than the magic numbers. # get the current day of the year doy = datetime.today().timetuple().tm_yday # "day of year" ranges for the northern hemisphere spring = range(80, 172) summer = range(172, 264) fall = range(264, 355) # winter = everything else if doy in spring: season = 'spring' elif doy in summer: season = 'summer' elif doy in fall: season = 'fall' else: season = 'winter' A: I came here looking how to map dates to seasons, and based on this answer I finally solved it in the following way: def season_of_date(date): year = str(date.year) seasons = {'spring': pd.date_range(start='21/03/'+year, end='20/06/'+year), 'summer': pd.date_range(start='21/06/'+year, end='22/09/'+year), 'autumn': pd.date_range(start='23/09/'+year, end='20/12/'+year)} if date in seasons['spring']: return 'spring' if date in seasons['summer']: return 'summer' if date in seasons['autumn']: return 'autumn' else: return 'winter' # Assuming df has a date column of type `datetime` df['season'] = df.date.map(season_of_date) So in principle it works for any year, given a datetime. A: The hemisphere that you are in must be taken into account. You must determine the hemisphere using geolocation yourself. def season(self, HEMISPHERE): date = self.now() md = date.month * 100 + date.day if ((md > 320) and (md < 621)): s = 0 #spring elif ((md > 620) and (md < 923)): s = 1 #summer elif ((md > 922) and (md < 1223)): s = 2 #fall else: s = 3 #winter if not HEMISPHERE == 'north': s = (s + 2) % 3 return s A: There is no need to deal with years, it is enough to compare the month and day as tuples like this: import datetime def get_season(date: datetime.datetime, north_hemisphere: bool = True) -> str: now = (date.month, date.day) if (3, 21) <= now < (6, 21): season = 'spring' if north_hemisphere else 'fall' elif (6, 21) <= now < (9, 21): season = 'summer' if north_hemisphere else 'winter' elif (9, 21) <= now < (12, 21): season = 'fall' if north_hemisphere else 'spring' else: season = 'winter' if north_hemisphere else 'summer' return season A: I'm too new to comment, and my edit was rejected, so here is corrected code for @adsf reponse. def season(date, hemisphere): ''' date is a datetime object hemisphere is either 'north' or 'south', dependent on long/lat. ''' md = date.month * 100 + date.day if ((md > 320) and (md < 621)): s = 0 #spring elif ((md > 620) and (md < 923)): s = 1 #summer elif ((md > 922) and (md < 1223)): s = 2 #fall else: s = 3 #winter if hemisphere != 'north': if s < 2: s += 2 else: s -= 2 return s A: I think you can use pandas.Series.dt.quarter? For example, datetime = pd.Series(pd.to_datetime(['2010-09-30', '2010-04-25', '2010-01-25', '2010-10-29', '2010-12-25'])) seasons = datetime.dt.quarter seasons: 0 3 1 2 2 1 3 4 4 4 seasons would be what you want? A: This is how I finally solved it. I doubt this is the best solution, but it works. Feel free to offer better solutions. import datetime def get_season(date): """ convert date to month and day as integer (md), e.g. 4/21 = 421, 11/17 = 1117, etc. """ m = date.month * 100 d = date.day md = m + d if ((md >= 301) and (md <= 531)): s = 0 # spring elif ((md > 531) and (md < 901)): s = 1 # summer elif ((md >= 901) and (md <= 1130)): s = 2 # fall elif ((md > 1130) and (md <= 0229)): s = 3 # winter else: raise IndexError("Invalid date") return s season = get_season(dt.date()) A: This is what i normally use: seasons = {'Summer':(datetime(2014,6,21), datetime(2014,9,22)), 'Autumn':(datetime(2014,9,23), datetime(2014,12,20)), 'Spring':(datetime(2014,3,21), datetime(2014,6,20))} def get_season(date): for season,(season_start, season_end) in seasons.items(): if date>=season_start and date<= season_end: return season else: return 'Winter' A: This is a follow up to the answer by @iipr The function posted does work, but will have to be adapted to the DateTime format you have in your dataset so keep that in mind! Mine is in the format YYYY-MM-DD, while the original function assumes DD-MM-YYYY. The function will also not work if the DateTime column includes timestamps (which mine did), which necessitates some more adjustments (see below). def season_of_date(date): year = str(date.year) # Extract a string of only the date of the DateTime column, use that in the if statements. date_only = str(date.date()) # Changed the format of the date_range from DD-MM-YYYY to YYYY-MM-DD. seasons = {'spring': pandas.date_range(start=year+'/03/21', end=year+'/06/20'), 'summer': pandas.date_range(start=year+'/06/21', end=year+'/09/22'), 'autumn': pandas.date_range(start=year+'/09/23', end=year+'/12/20')} if date_only in seasons['spring']: return 'spring' if date_only in seasons['summer']: return 'summer' if date_only in seasons['autumn']: return 'autumn' else: return 'winter' # Assuming df has a date column of type `datetime` df['season'] = df.date.map(season_of_date) This should hopefully be a bit more robust. A: using python datetime and simple dictionary import datetime date = datetime.datetime.strptime('01/05/2015 01:30:00 PM', "%m/%d/%Y %H:%M:%S %p") //using above created date for the code below seasons = {1:[12,1,2],2:[3,4,5],3:[6,7,8],4:[9,10,11]} ss={} for k,v in seasons.items(): for e in v: ss[e] = k print(ss[date.month])
Determine season given timestamp in Python using datetime
I'd like to extract only the month and day from a timestamp using the datetime module (not time) and then determine if it falls within a given season (fall, summer, winter, spring) based on the fixed dates of the solstices and equinoxes. For instance, if the date falls between March 21 and June 20, it is spring. Regardless of the year. I want it to just look at the month and day and ignore the year in this calculation. I've been running into trouble using this because the month is not being extracted properly from my data, for this reason.
[ "\nif the date falls between March 21 and June 20, it is spring.\n Regardless of the year. I want it to just look at the month and day\n and ignore the year in this calculation.\n\n#!/usr/bin/env python\nfrom datetime import date, datetime\n\nY = 2000 # dummy leap year to allow input X-02-29 (leap day)\nseasons = [('winter', (date(Y, 1, 1), date(Y, 3, 20))),\n ('spring', (date(Y, 3, 21), date(Y, 6, 20))),\n ('summer', (date(Y, 6, 21), date(Y, 9, 22))),\n ('autumn', (date(Y, 9, 23), date(Y, 12, 20))),\n ('winter', (date(Y, 12, 21), date(Y, 12, 31)))]\n\ndef get_season(now):\n if isinstance(now, datetime):\n now = now.date()\n now = now.replace(year=Y)\n return next(season for season, (start, end) in seasons\n if start <= now <= end)\n\nprint(get_season(date.today()))\n\nIt is an extended version of @Manuel G answer to support any year.\n", "It might be easier just to use the day of year parameter. It's not much different than your approach, but possibly easier to understand than the magic numbers.\n# get the current day of the year\ndoy = datetime.today().timetuple().tm_yday\n\n# \"day of year\" ranges for the northern hemisphere\nspring = range(80, 172)\nsummer = range(172, 264)\nfall = range(264, 355)\n# winter = everything else\n\nif doy in spring:\n season = 'spring'\nelif doy in summer:\n season = 'summer'\nelif doy in fall:\n season = 'fall'\nelse:\n season = 'winter'\n\n", "I came here looking how to map dates to seasons, and based on this answer I finally solved it in the following way:\ndef season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/'+year, end='20/06/'+year),\n 'summer': pd.date_range(start='21/06/'+year, end='22/09/'+year),\n 'autumn': pd.date_range(start='23/09/'+year, end='20/12/'+year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n# Assuming df has a date column of type `datetime`\ndf['season'] = df.date.map(season_of_date)\n\nSo in principle it works for any year, given a datetime.\n", "The hemisphere that you are in must be taken into account. You must determine the hemisphere using geolocation yourself.\ndef season(self, HEMISPHERE):\n date = self.now()\n md = date.month * 100 + date.day\n\n if ((md > 320) and (md < 621)):\n s = 0 #spring\n elif ((md > 620) and (md < 923)):\n s = 1 #summer\n elif ((md > 922) and (md < 1223)):\n s = 2 #fall\n else:\n s = 3 #winter\n\n if not HEMISPHERE == 'north':\n s = (s + 2) % 3\n return s\n\n", "There is no need to deal with years, it is enough to compare the month and day as tuples like this:\nimport datetime\n\n\ndef get_season(date: datetime.datetime, north_hemisphere: bool = True) -> str:\n now = (date.month, date.day)\n if (3, 21) <= now < (6, 21):\n season = 'spring' if north_hemisphere else 'fall'\n elif (6, 21) <= now < (9, 21):\n season = 'summer' if north_hemisphere else 'winter'\n elif (9, 21) <= now < (12, 21):\n season = 'fall' if north_hemisphere else 'spring'\n else:\n season = 'winter' if north_hemisphere else 'summer'\n\n return season\n\n", "I'm too new to comment, and my edit was rejected, so here is corrected code for @adsf reponse.\ndef season(date, hemisphere):\n ''' date is a datetime object\n hemisphere is either 'north' or 'south', dependent on long/lat.\n '''\n md = date.month * 100 + date.day\n\n if ((md > 320) and (md < 621)):\n s = 0 #spring\n elif ((md > 620) and (md < 923)):\n s = 1 #summer\n elif ((md > 922) and (md < 1223)):\n s = 2 #fall\n else:\n s = 3 #winter\n\n if hemisphere != 'north':\n if s < 2:\n s += 2 \n else:\n s -= 2\n\n return s\n\n", "I think you can use pandas.Series.dt.quarter? For example,\ndatetime = pd.Series(pd.to_datetime(['2010-09-30', '2010-04-25', '2010-01-25', '2010-10-29', '2010-12-25']))\nseasons = datetime.dt.quarter\n\nseasons:\n0 3\n1 2\n2 1\n3 4\n4 4\n\nseasons would be what you want?\n", "This is how I finally solved it. I doubt this is the best solution, but it works. Feel free to offer better solutions.\nimport datetime\n\ndef get_season(date):\n \"\"\"\n convert date to month and day as integer (md), e.g. 4/21 = 421, 11/17 = 1117, etc.\n \"\"\"\n m = date.month * 100\n d = date.day\n md = m + d\n\n if ((md >= 301) and (md <= 531)):\n s = 0 # spring\n elif ((md > 531) and (md < 901)):\n s = 1 # summer\n elif ((md >= 901) and (md <= 1130)):\n s = 2 # fall\n elif ((md > 1130) and (md <= 0229)):\n s = 3 # winter\n else:\n raise IndexError(\"Invalid date\")\n\n return s\n\nseason = get_season(dt.date())\n\n", "This is what i normally use:\nseasons = {'Summer':(datetime(2014,6,21), datetime(2014,9,22)),\n 'Autumn':(datetime(2014,9,23), datetime(2014,12,20)),\n 'Spring':(datetime(2014,3,21), datetime(2014,6,20))}\n\ndef get_season(date):\n for season,(season_start, season_end) in seasons.items():\n if date>=season_start and date<= season_end:\n return season\n else:\n return 'Winter'\n\n", "This is a follow up to the answer by @iipr\nThe function posted does work, but will have to be adapted to the DateTime format you have in your dataset so keep that in mind!\nMine is in the format YYYY-MM-DD, while the original function assumes DD-MM-YYYY.\nThe function will also not work if the DateTime column includes timestamps (which mine did), which necessitates some more adjustments (see below).\ndef season_of_date(date):\nyear = str(date.year)\n\n# Extract a string of only the date of the DateTime column, use that in the if statements.\ndate_only = str(date.date())\n\n# Changed the format of the date_range from DD-MM-YYYY to YYYY-MM-DD.\nseasons = {'spring': pandas.date_range(start=year+'/03/21', end=year+'/06/20'),\n 'summer': pandas.date_range(start=year+'/06/21', end=year+'/09/22'),\n 'autumn': pandas.date_range(start=year+'/09/23', end=year+'/12/20')}\nif date_only in seasons['spring']:\n return 'spring'\nif date_only in seasons['summer']:\n return 'summer'\nif date_only in seasons['autumn']:\n return 'autumn'\nelse:\n return 'winter'\n\n# Assuming df has a date column of type `datetime`\ndf['season'] = df.date.map(season_of_date)\n\nThis should hopefully be a bit more robust.\n", "using python datetime and simple dictionary\nimport datetime\ndate = datetime.datetime.strptime('01/05/2015 01:30:00 PM', \"%m/%d/%Y %H:%M:%S %p\")\n\n//using above created date for the code below\nseasons = {1:[12,1,2],2:[3,4,5],3:[6,7,8],4:[9,10,11]}\nss={}\nfor k,v in seasons.items():\n for e in v:\n ss[e] = k\nprint(ss[date.month])\n\n" ]
[ 23, 18, 9, 3, 3, 2, 2, 1, 1, 1, 0 ]
[]
[]
[ "date", "python", "python_2.6" ]
stackoverflow_0016139306_date_python_python_2.6.txt
Q: How to set a layout once a matplotlib Figure has been created and what are the possible choices? Say that I created a Figure with from matplotlib import pyplot as plt fig = plt.figure() and then AFTER I created I want to change its layout (I am aware that I can instantiate the Figure object by passing the argument layout to plt.figure, e.g. fig = plt.figure(layout="constrained"), which is not what I want). So far, I know that one could do fig.tight_layout() to set a tight layout, but what if one to set a "constrained" layout? Furthermore, other than "tight" and "constrained" what are the other possible available layouts? A: You can use the fig.set_layout_engine() with matplolib-3.6.2 : As follows: fig = plt.figure() fig.set_layout_engine('constrained') The possible layouts as per documentation are: 'constrained', 'compressed', 'tight'.
How to set a layout once a matplotlib Figure has been created and what are the possible choices?
Say that I created a Figure with from matplotlib import pyplot as plt fig = plt.figure() and then AFTER I created I want to change its layout (I am aware that I can instantiate the Figure object by passing the argument layout to plt.figure, e.g. fig = plt.figure(layout="constrained"), which is not what I want). So far, I know that one could do fig.tight_layout() to set a tight layout, but what if one to set a "constrained" layout? Furthermore, other than "tight" and "constrained" what are the other possible available layouts?
[ "You can use the fig.set_layout_engine() with matplolib-3.6.2 :\nAs follows:\nfig = plt.figure()\nfig.set_layout_engine('constrained')\n\nThe possible layouts as per documentation are: 'constrained', 'compressed', 'tight'.\n" ]
[ 1 ]
[]
[]
[ "matplotlib", "python" ]
stackoverflow_0074528897_matplotlib_python.txt
Q: Error tensorflow not getting imported import tensorflow as tf ModuleNotFoundError: No module named 'tensorflow' in python 3.11.0 There is an error coming in importing tensorflow in Python 3.11 in windows 10 in a machine learning project even though I have imported tensorflow via pip. The code is: from mlforkids import MLforKidsImageProject # treat this key like a password and keep it secret! key = "the key will not be revealed" # this will train your model and might take a little while myproject = MLforKidsImageProject(key) myproject.train_model() # CHANGE THIS to the image file you want to recognize demo = myproject.prediction("mytest.jpg") label = demo["class_name"] confidence = demo["confidence"] # CHANGE THIS to do something different with the result print ("result: '%s' with %d%% confidence" % (label, confidence)) the code for mlforkids.py is : import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf tf.get_logger().setLevel('ERROR') import tensorflow_hub as hub from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras import Sequential from tensorflow.keras.layers import Dropout, Dense from tensorflow.keras.layers.experimental.preprocessing import Rescaling import numpy as np import urllib.request, urllib.error, json from time import sleep # # Helper class for training an image classifier using training data # from the Machine Learning for Kids website. # class MLforKidsImageProject: IMAGESIZE=(224,224) INPUTLAYERSIZE=IMAGESIZE + (3,) # scratchkey is the secret API key that allows access to training # data from a single project on the MLforKids website def __init__(self, scratchkey: str): # register custom HTTP handler opener = urllib.request.build_opener(MLforKidsHTTP()) urllib.request.install_opener(opener) print("MLFORKIDS: Downloading information about your machine learning project") self.scratchkey = scratchkey try: apiurl = self.__switchToTemporarySite("https://machinelearningforkids.co.uk/api/scratch/" + scratchkey + "/train") with urllib.request.urlopen(apiurl) as url: self.__downloaded_training_images_list = json.loads(url.read().decode()) except urllib.error.HTTPError: raise RuntimeError("Unable to retrieve machine learning project - please check that the key is correct") # Generates a name for the local cache file where the downloaded training # image is saved. An image file extension is required, otherwise it will # be ignored by ImageDataGenerator. def __get_fname(self, trainingitem): extension = ".png" if trainingitem["imageurl"].lower().endswith(".png") else ".jpg" return trainingitem["id"] + extension # Downloads all of the training images for this project, and sets up an # ImageDataGenerator against the folder where they have been downloaded def __get_training_images_generator(self): print("MLFORKIDS: Getting your training images to use to train your machine learning model") cachedir = "~/.keras/" cachelocation = os.path.join("datasets", "mlforkids", self.scratchkey) projectcachedir = str(os.path.expanduser(os.path.join(cachedir, cachelocation))) for trainingitem in self.__downloaded_training_images_list: try: tf.keras.utils.get_file(origin=self.__switchToTemporarySite(trainingitem["imageurl"]), cache_dir=cachedir, cache_subdir=os.path.join(cachelocation, trainingitem["label"]), fname=self.__get_fname(trainingitem)) # avoid common rate-limiting errors by pausing # for a quarter-second between each download sleep(0.25) except Exception as downloaderr: print("ERROR: Unable to download training image from", trainingitem["imageurl"]) print(downloaderr) print("ERROR: Skipping training image and continuing without it", trainingitem["imageurl"]) return ImageDataGenerator().flow_from_directory(str(projectcachedir), target_size=MLforKidsImageProject.IMAGESIZE) # Creates a lookup table for the classes that this project is being trained # to recognize. # TODO : dumb implementation - should rewrite def __get_class_lookup(self, training_image_data): class_labels = [None]*training_image_data.num_classes class_names = training_image_data.class_indices.keys() for classname in class_names: class_labels[training_image_data.class_indices[classname]] = classname return class_labels # Defines a simple image classifier based on a mobilenet model from TensorFlow hub def __define_model(self): print("MLFORKIDS: Defining the layers to include in your neural network") model = Sequential([ # input layer is resizing all images to save having to do that in a manual pre-processing step Rescaling(1/127, input_shape=MLforKidsImageProject.INPUTLAYERSIZE), # using an existing pre-trained model as an untrainable main layer hub.KerasLayer("https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/5"), # Dropout(rate=0.2), # Dense(self.num_classes) ]) model.build((None,) + MLforKidsImageProject.INPUTLAYERSIZE) # model compile parameters copied from tutorial at https://www.tensorflow.org/hub/tutorials/tf2_image_retraining model.compile( optimizer=tf.keras.optimizers.SGD(learning_rate=0.005, momentum=0.9), loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=0.1), metrics=['accuracy']) return model # Runs the model fit function to train the tl model def __train_model(self, trainingimagesdata): print("MLFORKIDS: Starting the training of your machine learning model") if trainingimagesdata.batch_size > trainingimagesdata.samples: trainingimagesdata.batch_size = trainingimagesdata.samples steps_per_epoch = trainingimagesdata.samples // trainingimagesdata.batch_size epochs = 8 if trainingimagesdata.samples > 55: epochs = 15 self.ml_model.fit(trainingimagesdata, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=0) print("MLFORKIDS: Model training complete") # Cloudflare is currently blocking access to the Machine Learning for Kids API # from non-browser user agents # While I raise this with them to get this unblocked, switching to this # temporary URL should avoid the problem # # TODO: remove this function as soon as Cloudflare have # stopped breaking Python apps # def __switchToTemporarySite(self, url): return url.replace("https://machinelearningforkids.co.uk/api/scratch/", "https://mlforkids-api.j8clybxvjr0.us-south.codeengine.appdomain.cloud/api/scratch/") # # public methods # # Fetches the training data for this project, and uses it to train a machine learning model def train_model(self): training_images = self.__get_training_images_generator() self.num_classes = training_images.num_classes self.ml_class_names = self.__get_class_lookup(training_images) self.ml_model = self.__define_model() self.__train_model(training_images) # Returns a prediction for the image at the specified location def prediction(self, image_location: str): if hasattr(self, "ml_model") == False: raise RuntimeError("Machine learning model has not been trained for this project") testimg = image.load_img(image_location, target_size=MLforKidsImageProject.IMAGESIZE) testimg = image.img_to_array(testimg) testimg = np.expand_dims(testimg, axis=0) predictions = self.ml_model.predict(testimg) topprediction = predictions[0] topanswer = np.argmax(topprediction) return { "class_name": self.ml_class_names[topanswer], "confidence": 100 * np.max(tf.nn.softmax(topprediction)) } # # Helper class for making HTTP requests to fetch training images # for machine learning projects # # It adds a user-agent header so that when scraping images from # third-party websites, the Python code correctly identifies # itself, so that appropriate rate-limiting can be applied. # class MLforKidsHTTP(urllib.request.HTTPHandler): def http_request(self, req): req.headers["User-Agent"] = "MachineLearningForKidsPythonBot/1.0" return super().http_request(req) I have also installed the requirements which are : numpy==1.23.4 Pillow==9.2.0 scipy==1.9.3 tensorflow==2.10.0 tensorflow-hub==0.12.0 Please help me in this problem as soon as possible. Thank you A: As per the tested build configurations from the official Tensorflow documentation, Tensorflow 2.10 is compatible with Python versions 3.7 - 3.10. Kindly try again by downgrading the version of Python. You can find the build configurations here. Thank you!
Error tensorflow not getting imported import tensorflow as tf ModuleNotFoundError: No module named 'tensorflow' in python 3.11.0
There is an error coming in importing tensorflow in Python 3.11 in windows 10 in a machine learning project even though I have imported tensorflow via pip. The code is: from mlforkids import MLforKidsImageProject # treat this key like a password and keep it secret! key = "the key will not be revealed" # this will train your model and might take a little while myproject = MLforKidsImageProject(key) myproject.train_model() # CHANGE THIS to the image file you want to recognize demo = myproject.prediction("mytest.jpg") label = demo["class_name"] confidence = demo["confidence"] # CHANGE THIS to do something different with the result print ("result: '%s' with %d%% confidence" % (label, confidence)) the code for mlforkids.py is : import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf tf.get_logger().setLevel('ERROR') import tensorflow_hub as hub from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras import Sequential from tensorflow.keras.layers import Dropout, Dense from tensorflow.keras.layers.experimental.preprocessing import Rescaling import numpy as np import urllib.request, urllib.error, json from time import sleep # # Helper class for training an image classifier using training data # from the Machine Learning for Kids website. # class MLforKidsImageProject: IMAGESIZE=(224,224) INPUTLAYERSIZE=IMAGESIZE + (3,) # scratchkey is the secret API key that allows access to training # data from a single project on the MLforKids website def __init__(self, scratchkey: str): # register custom HTTP handler opener = urllib.request.build_opener(MLforKidsHTTP()) urllib.request.install_opener(opener) print("MLFORKIDS: Downloading information about your machine learning project") self.scratchkey = scratchkey try: apiurl = self.__switchToTemporarySite("https://machinelearningforkids.co.uk/api/scratch/" + scratchkey + "/train") with urllib.request.urlopen(apiurl) as url: self.__downloaded_training_images_list = json.loads(url.read().decode()) except urllib.error.HTTPError: raise RuntimeError("Unable to retrieve machine learning project - please check that the key is correct") # Generates a name for the local cache file where the downloaded training # image is saved. An image file extension is required, otherwise it will # be ignored by ImageDataGenerator. def __get_fname(self, trainingitem): extension = ".png" if trainingitem["imageurl"].lower().endswith(".png") else ".jpg" return trainingitem["id"] + extension # Downloads all of the training images for this project, and sets up an # ImageDataGenerator against the folder where they have been downloaded def __get_training_images_generator(self): print("MLFORKIDS: Getting your training images to use to train your machine learning model") cachedir = "~/.keras/" cachelocation = os.path.join("datasets", "mlforkids", self.scratchkey) projectcachedir = str(os.path.expanduser(os.path.join(cachedir, cachelocation))) for trainingitem in self.__downloaded_training_images_list: try: tf.keras.utils.get_file(origin=self.__switchToTemporarySite(trainingitem["imageurl"]), cache_dir=cachedir, cache_subdir=os.path.join(cachelocation, trainingitem["label"]), fname=self.__get_fname(trainingitem)) # avoid common rate-limiting errors by pausing # for a quarter-second between each download sleep(0.25) except Exception as downloaderr: print("ERROR: Unable to download training image from", trainingitem["imageurl"]) print(downloaderr) print("ERROR: Skipping training image and continuing without it", trainingitem["imageurl"]) return ImageDataGenerator().flow_from_directory(str(projectcachedir), target_size=MLforKidsImageProject.IMAGESIZE) # Creates a lookup table for the classes that this project is being trained # to recognize. # TODO : dumb implementation - should rewrite def __get_class_lookup(self, training_image_data): class_labels = [None]*training_image_data.num_classes class_names = training_image_data.class_indices.keys() for classname in class_names: class_labels[training_image_data.class_indices[classname]] = classname return class_labels # Defines a simple image classifier based on a mobilenet model from TensorFlow hub def __define_model(self): print("MLFORKIDS: Defining the layers to include in your neural network") model = Sequential([ # input layer is resizing all images to save having to do that in a manual pre-processing step Rescaling(1/127, input_shape=MLforKidsImageProject.INPUTLAYERSIZE), # using an existing pre-trained model as an untrainable main layer hub.KerasLayer("https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/5"), # Dropout(rate=0.2), # Dense(self.num_classes) ]) model.build((None,) + MLforKidsImageProject.INPUTLAYERSIZE) # model compile parameters copied from tutorial at https://www.tensorflow.org/hub/tutorials/tf2_image_retraining model.compile( optimizer=tf.keras.optimizers.SGD(learning_rate=0.005, momentum=0.9), loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=0.1), metrics=['accuracy']) return model # Runs the model fit function to train the tl model def __train_model(self, trainingimagesdata): print("MLFORKIDS: Starting the training of your machine learning model") if trainingimagesdata.batch_size > trainingimagesdata.samples: trainingimagesdata.batch_size = trainingimagesdata.samples steps_per_epoch = trainingimagesdata.samples // trainingimagesdata.batch_size epochs = 8 if trainingimagesdata.samples > 55: epochs = 15 self.ml_model.fit(trainingimagesdata, epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=0) print("MLFORKIDS: Model training complete") # Cloudflare is currently blocking access to the Machine Learning for Kids API # from non-browser user agents # While I raise this with them to get this unblocked, switching to this # temporary URL should avoid the problem # # TODO: remove this function as soon as Cloudflare have # stopped breaking Python apps # def __switchToTemporarySite(self, url): return url.replace("https://machinelearningforkids.co.uk/api/scratch/", "https://mlforkids-api.j8clybxvjr0.us-south.codeengine.appdomain.cloud/api/scratch/") # # public methods # # Fetches the training data for this project, and uses it to train a machine learning model def train_model(self): training_images = self.__get_training_images_generator() self.num_classes = training_images.num_classes self.ml_class_names = self.__get_class_lookup(training_images) self.ml_model = self.__define_model() self.__train_model(training_images) # Returns a prediction for the image at the specified location def prediction(self, image_location: str): if hasattr(self, "ml_model") == False: raise RuntimeError("Machine learning model has not been trained for this project") testimg = image.load_img(image_location, target_size=MLforKidsImageProject.IMAGESIZE) testimg = image.img_to_array(testimg) testimg = np.expand_dims(testimg, axis=0) predictions = self.ml_model.predict(testimg) topprediction = predictions[0] topanswer = np.argmax(topprediction) return { "class_name": self.ml_class_names[topanswer], "confidence": 100 * np.max(tf.nn.softmax(topprediction)) } # # Helper class for making HTTP requests to fetch training images # for machine learning projects # # It adds a user-agent header so that when scraping images from # third-party websites, the Python code correctly identifies # itself, so that appropriate rate-limiting can be applied. # class MLforKidsHTTP(urllib.request.HTTPHandler): def http_request(self, req): req.headers["User-Agent"] = "MachineLearningForKidsPythonBot/1.0" return super().http_request(req) I have also installed the requirements which are : numpy==1.23.4 Pillow==9.2.0 scipy==1.9.3 tensorflow==2.10.0 tensorflow-hub==0.12.0 Please help me in this problem as soon as possible. Thank you
[ "As per the tested build configurations from the official Tensorflow documentation, Tensorflow 2.10 is compatible with Python versions 3.7 - 3.10. Kindly try again by downgrading the version of Python. You can find the build configurations here. Thank you!\n" ]
[ 0 ]
[]
[]
[ "machine_learning", "python", "tensorflow" ]
stackoverflow_0074401578_machine_learning_python_tensorflow.txt
Q: How can I classify a column of strings with true and false values by comparing with another column of strings So I have a column of strings that is listed as "compounds" Composition (column title) ZrMo3 Gd(CuS)3 Ba2DyInTe5 I have another column that has strings metal elements from the periodic table and i'll call that column "metals" Elements (column title) Li Be Na The objective is to check each string from "compounds" with every single string listed in "metals" and if any string from metals is there then it would be classified as true. Any ideas how I can code this? Example: (if "metals" has Zr, Ag, and Te) ZrMo3 True Gd(CuS)3 False Ba2DyInTe5 True I recently tried using this code below, but I ended up getting all false asd = subset['composition'].isin(metals['Elements']) print(asd) also tried this code and got all false as well subset['Boolean'] = subset.apply(lambda x: True if any(word in x.composition for word in metals) else False, axis=1) A: assuming you are using pandas, you can use a list comprehension inside your lambda since you essentially need to iterate over all elements in the elements list import pandas as pd elements = ['Li', 'Be', 'Na', 'Te'] compounds = ['ZrMo3', 'Gd(CuS)3', 'Ba2DyInTe5'] df = pd.DataFrame(compounds, columns=['compounds']) print(df) output compounds 0 ZrMo3 1 Gd(CuS)3 2 Ba2DyInTe5 df['boolean'] = df.compounds.apply(lambda x: any([True if el in x else False for el in elements])) print(df) output compounds boolean 0 ZrMo3 False 1 Gd(CuS)3 False 2 Ba2DyInTe5 True if you are not using pandas, you can apply the lambda function to the lists with the map function out = list( map( lambda x: any([True if el in x else False for el in elements]), compounds) ) print(out) output [False, False, True] here would be a more complex version which also tackles the potential errors @Ezon mentioned based on the regular expression matching module re. since this approach is essentially looping not only over the elements to compare with a single compound string but also over each constituent of the compounds I made two helper functions for it to be more readable. import re import pandas as pd def split_compounds(c): # remove all non-alphabet elements c_split = re.sub(r"[^a-zA-Z]", "", c) # split string at capital letters c_split = '-'.join(re.findall('[A-Z][^A-Z]*', c_split)) return c_split def compare_compound(compound, element): # split compound into list compound_list = compound.split('-') return any([element == c for c in compound_list]) # build sample data compounds = ['SiO2', 'Ba2DyInTe5', 'ZrMo3', 'Gd(CuS)3'] elements = ['Li', 'Be', 'Na', 'Te', 'S'] df = pd.DataFrame(compounds, columns=['compounds']) # split compounds into elements df['compounds_elements'] = [split_compounds(x) for x in compounds] print(df) output compounds compounds_elements 0 SiO2 Si-O 1 Ba2DyInTe5 Ba-Dy-In-Te 2 ZrMo3 Zr-Mo 3 Gd(CuS)3 Gd-Cu-S # check if any item from 'elements' is in the compounds df['boolean'] = df.compounds_elements.apply( lambda x: any([True if compare_compound(x, el) else False for el in elements]) ) print(df) output compounds compounds_elements boolean 0 SiO2 Si-O False 1 Ba2DyInTe5 Ba-Dy-In-Te True 2 ZrMo3 Zr-Mo False 3 Gd(CuS)3 Gd-Cu-S True
How can I classify a column of strings with true and false values by comparing with another column of strings
So I have a column of strings that is listed as "compounds" Composition (column title) ZrMo3 Gd(CuS)3 Ba2DyInTe5 I have another column that has strings metal elements from the periodic table and i'll call that column "metals" Elements (column title) Li Be Na The objective is to check each string from "compounds" with every single string listed in "metals" and if any string from metals is there then it would be classified as true. Any ideas how I can code this? Example: (if "metals" has Zr, Ag, and Te) ZrMo3 True Gd(CuS)3 False Ba2DyInTe5 True I recently tried using this code below, but I ended up getting all false asd = subset['composition'].isin(metals['Elements']) print(asd) also tried this code and got all false as well subset['Boolean'] = subset.apply(lambda x: True if any(word in x.composition for word in metals) else False, axis=1)
[ "assuming you are using pandas, you can use a list comprehension inside your lambda since you essentially need to iterate over all elements in the elements list\nimport pandas as pd\n\nelements = ['Li', 'Be', 'Na', 'Te']\ncompounds = ['ZrMo3', 'Gd(CuS)3', 'Ba2DyInTe5']\n\ndf = pd.DataFrame(compounds, columns=['compounds'])\nprint(df)\n\noutput\n compounds\n0 ZrMo3\n1 Gd(CuS)3\n2 Ba2DyInTe5\n\ndf['boolean'] = df.compounds.apply(lambda x: any([True if el in x else False for el in elements]))\nprint(df)\n\noutput\n compounds boolean\n0 ZrMo3 False\n1 Gd(CuS)3 False\n2 Ba2DyInTe5 True\n\nif you are not using pandas, you can apply the lambda function to the lists with the map function\nout = list(\n map(\n lambda x: any([True if el in x else False for el in elements]), compounds)\n)\nprint(out)\n\noutput\n[False, False, True]\n\nhere would be a more complex version which also tackles the potential errors @Ezon mentioned based on the regular expression matching module re. since this approach is essentially looping not only over the elements to compare with a single compound string but also over each constituent of the compounds I made two helper functions for it to be more readable.\nimport re\nimport pandas as pd\n\n\ndef split_compounds(c):\n \n # remove all non-alphabet elements\n c_split = re.sub(r\"[^a-zA-Z]\", \"\", c)\n # split string at capital letters\n c_split = '-'.join(re.findall('[A-Z][^A-Z]*', c_split))\n return c_split\n\ndef compare_compound(compound, element):\n \n # split compound into list\n compound_list = compound.split('-')\n \n return any([element == c for c in compound_list])\n \n \n# build sample data\ncompounds = ['SiO2', 'Ba2DyInTe5', 'ZrMo3', 'Gd(CuS)3']\nelements = ['Li', 'Be', 'Na', 'Te', 'S']\ndf = pd.DataFrame(compounds, columns=['compounds'])\n\n# split compounds into elements\ndf['compounds_elements'] = [split_compounds(x) for x in compounds]\n\nprint(df)\n\noutput\n compounds compounds_elements\n0 SiO2 Si-O\n1 Ba2DyInTe5 Ba-Dy-In-Te\n2 ZrMo3 Zr-Mo\n3 Gd(CuS)3 Gd-Cu-S\n\n\n# check if any item from 'elements' is in the compounds\ndf['boolean'] = df.compounds_elements.apply(\n lambda x: any([True if compare_compound(x, el) else False for el in elements])\n)\n\nprint(df)\n\noutput\n compounds compounds_elements boolean\n0 SiO2 Si-O False\n1 Ba2DyInTe5 Ba-Dy-In-Te True\n2 ZrMo3 Zr-Mo False\n3 Gd(CuS)3 Gd-Cu-S True\n\n" ]
[ 2 ]
[]
[]
[ "chemistry", "python" ]
stackoverflow_0074527231_chemistry_python.txt
Q: Formulate strict constraints in docplex I am trying to model the following strict constraint in python with docplex: mdl.add_constraint(sum(a[i] * mdl.variable[i] for i in range(nrItems)) > b) but I keep getting the error: docplex.mp.utils.DOcplexException: Unsupported relational operator: only <=, ==, >= are allowed How can one programm a strict constraint in docplex? A: MIP solvers do not support < and > as these do not make much sense when continuous variables (or relaxations) are involved (both from a mathematical point and from a numerical point of view). A: You could use a small epsilon and turn mdl.add_constraint(sum(a[i] * mdl.variable[i] for i in range(nrItems)) > b) into epsilon=0.00001 mdl.add_constraint(sum(a[i] * mdl.variable[i] for i in range(nrItems)) >= b+epsilon)
Formulate strict constraints in docplex
I am trying to model the following strict constraint in python with docplex: mdl.add_constraint(sum(a[i] * mdl.variable[i] for i in range(nrItems)) > b) but I keep getting the error: docplex.mp.utils.DOcplexException: Unsupported relational operator: only <=, ==, >= are allowed How can one programm a strict constraint in docplex?
[ "MIP solvers do not support < and > as these do not make much sense when continuous variables (or relaxations) are involved (both from a mathematical point and from a numerical point of view).\n", "You could use a small epsilon and turn\nmdl.add_constraint(sum(a[i] * mdl.variable[i] for i in range(nrItems)) > b)\n\ninto\nepsilon=0.00001\nmdl.add_constraint(sum(a[i] * mdl.variable[i] for i in range(nrItems)) >= b+epsilon)\n\n" ]
[ 0, 0 ]
[]
[]
[ "constraints", "docplex", "mathematical_optimization", "python" ]
stackoverflow_0074475444_constraints_docplex_mathematical_optimization_python.txt
Q: How to sum up values in a dataframe and add them to another one? I have two dataframes, one for individual transactions and another for the chart of accounts. I'm trying to sum up all transactions for the last month (in this case, March) for each CompanyKey. I then want to add this result as a new column to the chart of accounts dataframe with the CompanyKey as the column header. Here is a small sample of the transaction data (In reality there are thousands of transactions): import pandas as pd df = pd.DataFrame({ 'CompanyKey': ["1","1","1","1","1","1","1","2","2","2"], 'DateOccurred': ["31/12/2021","25/02/2022","15/03/2022","31/03/2022","31/12/2021","22/02/2022","16/03/2022","31/12/2021","25/02/2022","31/03/2022"], 'Account.Name': ["Cash at Bank","Cash at Bank","Cash at Bank","Cash at Bank","GST Paid","GST Paid","GST Paid","Cash at Bank","Cash at Bank","Cash at Bank"], 'Amount': [150,112200,234065,19167.08,-39080.03,-10200,-27.5,15000,-234567,340697]}) Here are the corresponding chart of accounts: df1 = pd.DataFrame({ 'ConsolidatedAccountName': ["Cash at Bank","GST Paid", "Cash at Bank", "GST Paid"], 'Level 1': ["Fund Statement","Fund Statement", "Cash Flow Statement", "Cash Flow Statement"], 'Level 2': ["Cash at Bank","GST Paid", "Cash at Bank", "GST Paid"]}) This is my desired result. I only want the Sums to be applied to rows which have a df['Level 1'] == "Fund Statement". +──────────────────────────+──────────────────────+───────────────+────────────────+────────────────+ | ConsolidatedAccountName | Level 1 | Level 2 | Company 1 Sum | Company 2 Sum | +──────────────────────────+──────────────────────+───────────────+────────────────+────────────────+ | Cash at Bank | Fund Statement | Cash at Bank | 253,232.08 | 340,697 | | GST Paid | Fund Statement | GST Paid | -27.50 | 0 | | Cash at Bank | Cash Flow Statement | Cash at Bank | NaN | NaN | | GST Paid | Cash Flow Statement | GST Paid | NaN | NaN | +──────────────────────────+──────────────────────+───────────────+────────────────+────────────────+ This is about as far as I got before running into issues. company_keys = [1, 2] for company in company_keys: d1['Company 1 Sum'] = np.where((d3['CompanyKey'] == company) & (d3['DateOccurred'] >= '01/03/2022') & (d3['DateOccurred'] <= '31/03/2022') & (d1['Level 1'] == 'Fund Statement'), d3['Amount'].sum(), 0) This is the error I get. ValueError: Length of values (10) does not match length of index (4) A: Here is one way to do it with Pandas groupby and apply: # Setup df["DateOccurred"] = pd.to_datetime(df["DateOccurred"], format="%d/%m/%Y") # Sum transactions per companies and accounts df_sum = ( df.loc[df["DateOccurred"].dt.month == 3, :] .groupby(["CompanyKey", "Account.Name"]) .agg({"Amount": sum}) ) # Add new columns for idx in df["CompanyKey"].unique(): df1[f"Company {idx} Sum"] = df1.apply( lambda x: df_sum.loc[(idx, x["ConsolidatedAccountName"]), "Amount"] if (x["ConsolidatedAccountName"] in df_sum.loc[(idx), :].index.unique()) and (x["Level 1"] == "Fund Statement") else None, axis=1, ) # Cleanup df1.loc[df1["Level 1"] == "Fund Statement"] = df1.loc[ df1["Level 1"] == "Fund Statement" ].fillna(0) Then: print(df1) # Output ConsolidatedAccountName Level 1 Level 2 Company 1 Sum Company 2 Sum 0 Cash at Bank Fund Statement Cash at Bank 253232.08 340697.0 1 GST Paid Fund Statement GST Paid -27.50 0.0 2 Cash at Bank Cash Flow Statement Cash at Bank NaN NaN 3 GST Paid Cash Flow Statement GST Paid NaN NaN
How to sum up values in a dataframe and add them to another one?
I have two dataframes, one for individual transactions and another for the chart of accounts. I'm trying to sum up all transactions for the last month (in this case, March) for each CompanyKey. I then want to add this result as a new column to the chart of accounts dataframe with the CompanyKey as the column header. Here is a small sample of the transaction data (In reality there are thousands of transactions): import pandas as pd df = pd.DataFrame({ 'CompanyKey': ["1","1","1","1","1","1","1","2","2","2"], 'DateOccurred': ["31/12/2021","25/02/2022","15/03/2022","31/03/2022","31/12/2021","22/02/2022","16/03/2022","31/12/2021","25/02/2022","31/03/2022"], 'Account.Name': ["Cash at Bank","Cash at Bank","Cash at Bank","Cash at Bank","GST Paid","GST Paid","GST Paid","Cash at Bank","Cash at Bank","Cash at Bank"], 'Amount': [150,112200,234065,19167.08,-39080.03,-10200,-27.5,15000,-234567,340697]}) Here are the corresponding chart of accounts: df1 = pd.DataFrame({ 'ConsolidatedAccountName': ["Cash at Bank","GST Paid", "Cash at Bank", "GST Paid"], 'Level 1': ["Fund Statement","Fund Statement", "Cash Flow Statement", "Cash Flow Statement"], 'Level 2': ["Cash at Bank","GST Paid", "Cash at Bank", "GST Paid"]}) This is my desired result. I only want the Sums to be applied to rows which have a df['Level 1'] == "Fund Statement". +──────────────────────────+──────────────────────+───────────────+────────────────+────────────────+ | ConsolidatedAccountName | Level 1 | Level 2 | Company 1 Sum | Company 2 Sum | +──────────────────────────+──────────────────────+───────────────+────────────────+────────────────+ | Cash at Bank | Fund Statement | Cash at Bank | 253,232.08 | 340,697 | | GST Paid | Fund Statement | GST Paid | -27.50 | 0 | | Cash at Bank | Cash Flow Statement | Cash at Bank | NaN | NaN | | GST Paid | Cash Flow Statement | GST Paid | NaN | NaN | +──────────────────────────+──────────────────────+───────────────+────────────────+────────────────+ This is about as far as I got before running into issues. company_keys = [1, 2] for company in company_keys: d1['Company 1 Sum'] = np.where((d3['CompanyKey'] == company) & (d3['DateOccurred'] >= '01/03/2022') & (d3['DateOccurred'] <= '31/03/2022') & (d1['Level 1'] == 'Fund Statement'), d3['Amount'].sum(), 0) This is the error I get. ValueError: Length of values (10) does not match length of index (4)
[ "Here is one way to do it with Pandas groupby and apply:\n# Setup\ndf[\"DateOccurred\"] = pd.to_datetime(df[\"DateOccurred\"], format=\"%d/%m/%Y\")\n\n# Sum transactions per companies and accounts\ndf_sum = (\n df.loc[df[\"DateOccurred\"].dt.month == 3, :]\n .groupby([\"CompanyKey\", \"Account.Name\"])\n .agg({\"Amount\": sum})\n)\n\n# Add new columns\nfor idx in df[\"CompanyKey\"].unique():\n df1[f\"Company {idx} Sum\"] = df1.apply(\n lambda x: df_sum.loc[(idx, x[\"ConsolidatedAccountName\"]), \"Amount\"]\n if (x[\"ConsolidatedAccountName\"] in df_sum.loc[(idx), :].index.unique())\n and (x[\"Level 1\"] == \"Fund Statement\")\n else None,\n axis=1,\n )\n\n# Cleanup\ndf1.loc[df1[\"Level 1\"] == \"Fund Statement\"] = df1.loc[\n df1[\"Level 1\"] == \"Fund Statement\"\n].fillna(0)\n\nThen:\nprint(df1)\n# Output\n\nConsolidatedAccountName Level 1 Level 2 Company 1 Sum Company 2 Sum\n0 Cash at Bank Fund Statement Cash at Bank 253232.08 340697.0\n1 GST Paid Fund Statement GST Paid -27.50 0.0\n2 Cash at Bank Cash Flow Statement Cash at Bank NaN NaN\n3 GST Paid Cash Flow Statement GST Paid NaN NaN\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074426326_pandas_python.txt
Q: Can't add exploded data in mysql database with pandas I want to insert in my database exploded data using pandas but I get an error, can someone help My Code tactic_theme = pandas.read_csv(link, usecols=(0, 7)) tactic_theme.columns = ['code_tac', 'code_th'] tactic_theme['code_th'] = tactic_theme.code_th.str.split(" ") tactic_theme.explode('code_th') tactic_theme.to_sql('tactic_themes', con = engine, if_exists='replace',index=False,chunksize = 1000) The error sqlalchemy.exc.OperationalError: (pymysql.err.OperationalError) (1241, 'Operand should contain 1 column(s)') [SQL: INSERT INTO tactic_themes (code_tac, code_th) VALUES (%(code_tac)s, %(code_th)s)] [parameters: ({'code_tac': '0000D', 'code_th': ['advantage', 'endgame', 'short']}, {'code_tac': '0009B', 'code_th': ['advantage', 'middlegame', 'short']}, {'code_tac': '000Vc', 'code_th': ['crushing', 'endgame', 'long', 'pawnEndgame']}, {'code_tac': '000Zo', 'code_th': ['endgame', 'mate', 'mateIn2', 'short']}, {'code_tac': '000aY', 'code_th': ['advantage', 'master', 'middlegame', 'short']}, {'code_tac': '000hf', 'code_th': ['mate', 'mateIn2', 'middlegame', 'short']}, {'code_tac': '000mr', 'code_th': ['crushing', 'middlegame', 'short']}, {'code_tac': '000rO', 'code_th': ['crushing', 'endgame', 'fork', 'short']} ... displaying 10 of 1000 total bound parameter sets ... {'code_tac': '01HAq', 'code_th': ['advantage', 'opening', 'short']}, {'code_tac': '01HV2', 'code_th': ['mate', 'mateIn1', 'middlegame', 'oneMove']})] A: I just saw the error,on the line tactic_theme.explode('code_th') i should write tactic_theme = tactic_theme.explode('code_th')
Can't add exploded data in mysql database with pandas
I want to insert in my database exploded data using pandas but I get an error, can someone help My Code tactic_theme = pandas.read_csv(link, usecols=(0, 7)) tactic_theme.columns = ['code_tac', 'code_th'] tactic_theme['code_th'] = tactic_theme.code_th.str.split(" ") tactic_theme.explode('code_th') tactic_theme.to_sql('tactic_themes', con = engine, if_exists='replace',index=False,chunksize = 1000) The error sqlalchemy.exc.OperationalError: (pymysql.err.OperationalError) (1241, 'Operand should contain 1 column(s)') [SQL: INSERT INTO tactic_themes (code_tac, code_th) VALUES (%(code_tac)s, %(code_th)s)] [parameters: ({'code_tac': '0000D', 'code_th': ['advantage', 'endgame', 'short']}, {'code_tac': '0009B', 'code_th': ['advantage', 'middlegame', 'short']}, {'code_tac': '000Vc', 'code_th': ['crushing', 'endgame', 'long', 'pawnEndgame']}, {'code_tac': '000Zo', 'code_th': ['endgame', 'mate', 'mateIn2', 'short']}, {'code_tac': '000aY', 'code_th': ['advantage', 'master', 'middlegame', 'short']}, {'code_tac': '000hf', 'code_th': ['mate', 'mateIn2', 'middlegame', 'short']}, {'code_tac': '000mr', 'code_th': ['crushing', 'middlegame', 'short']}, {'code_tac': '000rO', 'code_th': ['crushing', 'endgame', 'fork', 'short']} ... displaying 10 of 1000 total bound parameter sets ... {'code_tac': '01HAq', 'code_th': ['advantage', 'opening', 'short']}, {'code_tac': '01HV2', 'code_th': ['mate', 'mateIn1', 'middlegame', 'oneMove']})]
[ "I just saw the error,on the line tactic_theme.explode('code_th') i should write\ntactic_theme = tactic_theme.explode('code_th')\n\n" ]
[ 0 ]
[]
[]
[ "mysql", "pandas", "python" ]
stackoverflow_0074474274_mysql_pandas_python.txt
Q: Module "Numpy" not found despite already installed in system I have a problem thats been stumping me for days. I wanted to run this GAIN program on my local system though command line (https://github.com/jsyoon0823/GAIN) so I downloaded it, installed Python for the first time because Python was not found; run without arguments to install from the Microsoft Store, or disable this shortcut from Settings > Manage App Execution Aliases. then created a virtual environment. I ran pip install -r requirements.txt (logs) When I ran the program it said (GAINenv) C:\Users\Admin\Downloads\GAIN-master\GAIN-master>python3 main_letter_spam.py --data_name spam --miss_rate: 0.2 --batch_size 128 --hint_rate 0.9 --alpha 100 --iterations 10000 Traceback (most recent call last): File "C:\Users\Admin\Downloads\GAIN-master\GAIN-master\main_letter_spam.py", line 24, in <module> import numpy as np ModuleNotFoundError: No module named 'numpy' I was confused because they said in the logs numpy was installed. So I did the following: checked GAINenv\Lib\site-packages for numpy. It was already there written as numpy. In the import, it was also written as import numpy as np checked if there are multiple Python installations. There aren't ran pip install numpy. It said Requirement already satisfied: numpy in c:\users\admin\downloads\gain-master\gain-master\gainenv\lib\site-packages (1.23.5) forced install pip install -I numpy. Ran the command and got the same no module error Added the path in virtual env then reset the command prompt window. Same error did pip3 and pip2 install. Still no module error downloaded numpy directly from sourceforge then installed it in virtualenv directory. Doesn't work Here is the list of similar questions I read but didn't work: No module named 'numpy' but Requirement already satisfied: numpy Error "Import Error: No module named numpy" on Windows 'Python not found' despite having been installed https://unix.stackexchange.com/questions/93097/numpy-module-not-found-despite-being-in-path NumPy module not found after install Python Module not found despite being installed no module named numpy despite numpy being already installed Pyaudio module not found despite being installed This is what happens when I run python -m site (GAINenv) C:\Users\Admin\Downloads\GAIN-master\GAIN-master>python -m site sys.path = [ 'C:\\Users\\Admin\\Downloads\\GAIN-master\\GAIN-master', 'C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\\python310.zip', 'C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\\DLLs', 'C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\\lib', 'C:\\Users\\Admin\\AppData\\Local\\Microsoft\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0', 'C:\\Users\\Admin\\Downloads\\GAIN-master\\GAIN-master\\GAINenv', 'C:\\Users\\Admin\\Downloads\\GAIN-master\\GAIN-master\\GAINenv\\lib\\site-packages', ] USER_BASE: 'C:\\Users\\Admin\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages' (exists) USER_SITE: 'C:\\Users\\Admin\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages' (exists) ENABLE_USER_SITE: False I don't care about Anaconda and PyCharm because I simply want to run a python script locally through command line. I'm using Windows 10 and Python 3.10 from Microsoft Store. Thanks in advance! A: The following command worked for me: python.exe -m pip install numpy Or: Download and install http://sourceforge.net/projects/numpy/files/NumPy/ $ tar xfz numpy-n.m.tar.gz $ cd numpy-n.m $ python setup.py install A: I uninstalled Python 3.10 Microsoft Store, installed Python 3.10 for Windows from the official website, and ran the program without virtual environment.
Module "Numpy" not found despite already installed in system
I have a problem thats been stumping me for days. I wanted to run this GAIN program on my local system though command line (https://github.com/jsyoon0823/GAIN) so I downloaded it, installed Python for the first time because Python was not found; run without arguments to install from the Microsoft Store, or disable this shortcut from Settings > Manage App Execution Aliases. then created a virtual environment. I ran pip install -r requirements.txt (logs) When I ran the program it said (GAINenv) C:\Users\Admin\Downloads\GAIN-master\GAIN-master>python3 main_letter_spam.py --data_name spam --miss_rate: 0.2 --batch_size 128 --hint_rate 0.9 --alpha 100 --iterations 10000 Traceback (most recent call last): File "C:\Users\Admin\Downloads\GAIN-master\GAIN-master\main_letter_spam.py", line 24, in <module> import numpy as np ModuleNotFoundError: No module named 'numpy' I was confused because they said in the logs numpy was installed. So I did the following: checked GAINenv\Lib\site-packages for numpy. It was already there written as numpy. In the import, it was also written as import numpy as np checked if there are multiple Python installations. There aren't ran pip install numpy. It said Requirement already satisfied: numpy in c:\users\admin\downloads\gain-master\gain-master\gainenv\lib\site-packages (1.23.5) forced install pip install -I numpy. Ran the command and got the same no module error Added the path in virtual env then reset the command prompt window. Same error did pip3 and pip2 install. Still no module error downloaded numpy directly from sourceforge then installed it in virtualenv directory. Doesn't work Here is the list of similar questions I read but didn't work: No module named 'numpy' but Requirement already satisfied: numpy Error "Import Error: No module named numpy" on Windows 'Python not found' despite having been installed https://unix.stackexchange.com/questions/93097/numpy-module-not-found-despite-being-in-path NumPy module not found after install Python Module not found despite being installed no module named numpy despite numpy being already installed Pyaudio module not found despite being installed This is what happens when I run python -m site (GAINenv) C:\Users\Admin\Downloads\GAIN-master\GAIN-master>python -m site sys.path = [ 'C:\\Users\\Admin\\Downloads\\GAIN-master\\GAIN-master', 'C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\\python310.zip', 'C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\\DLLs', 'C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\\lib', 'C:\\Users\\Admin\\AppData\\Local\\Microsoft\\WindowsApps\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0', 'C:\\Users\\Admin\\Downloads\\GAIN-master\\GAIN-master\\GAINenv', 'C:\\Users\\Admin\\Downloads\\GAIN-master\\GAIN-master\\GAINenv\\lib\\site-packages', ] USER_BASE: 'C:\\Users\\Admin\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages' (exists) USER_SITE: 'C:\\Users\\Admin\\AppData\\Local\\Packages\\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\\LocalCache\\local-packages\\Python310\\site-packages' (exists) ENABLE_USER_SITE: False I don't care about Anaconda and PyCharm because I simply want to run a python script locally through command line. I'm using Windows 10 and Python 3.10 from Microsoft Store. Thanks in advance!
[ "The following command worked for me:\n\npython.exe -m pip install numpy\n\nOr:\nDownload and install\nhttp://sourceforge.net/projects/numpy/files/NumPy/\n$ tar xfz numpy-n.m.tar.gz\n$ cd numpy-n.m\n$ python setup.py install\n\n", "I uninstalled Python 3.10 Microsoft Store, installed Python 3.10 for Windows from the official website, and ran the program without virtual environment.\n" ]
[ 0, 0 ]
[]
[]
[ "numpy", "path", "python", "python_3.x", "virtualenv" ]
stackoverflow_0074528129_numpy_path_python_python_3.x_virtualenv.txt
Q: Guessing a missing value based on historical data Let's assume i have 100 different kinds of items, each item got a name and a physical weight. I know the names of all 100 items but only the weight of 80 items. When i ship items, i pack them in groups of 10 and sum the weight of these items. Due to some items are missing their weight, this will give an inaccurate sum when im about to ship. I have different shipments with missing weights Shipment 1 Item Name Item Weight Item 2 10 Item 27 20 Item 42 20 Item 71 - Item 77 - Total weight: 75 Shipment 2 Item Name Item Weight Item 2 10 Item 27 20 Item 42 20 Item 71 - Item 92 - Total weight: 90 Shipment 3 Item Name Item Weight Item 2 10 Item 27 20 Item 42 20 Item 55 35 Item 77 - Total weight: 100 Since some of the shipments share the same items with missing weights and i have the shipments total weight, is there a way with machine learning to determine the weight of these items without im unpacking the entire shipment? Or would it just be a, in this case, 100x3 Matrix with a lot of empty values? At this point im not really sure if i should use some type of regression to solve this or if its just a matrix, that would expand a lot if i had n more items to ship. I also wondered if this was some type of knapsack problem, but i hope anyone can guide my in the right direction. A: Forget about machine learning. This is a simple system of linear equations. w_71 + w_77 = 25 w_71 + w_92 = 40 w_77 = 15 You can solve it with sympy.solvers.solveset.linsolve, or scipy.optimize.linprog, or scipy.linalg.lstsq, or numpy.linalg.lstsq sympy.linsolve is maybe the easiest to understand if you are not familiar with matrices; however, if the system is underdetermined, then instead of returning a particular solution to the system, sympy.linsolve will return the general solution in parametric form. scipy.lstsq or numpy.lstsq expect the problem to be given in matrix form. If there is more than one possible solution, they will return the most "average" solution. However, they cannot take any positivity constraint into account: they might return a solution where one of the variables is negative. You can maybe fix this behaviour by adding a new equation to the system to manually force a variable to be positive, then solve again. scipy.linprog expects the problem to be given in matrix form; it also expects you to specify a linear objective function, to choose which particular solution is "best" in case there is more than one possible solution. linprog also considers that all variables are nonnegative by default, or allows you to specify explicit bounds for the variables yourself. It also allows you to add inequality constraints, in addition to the equations, if you wish to. Using sympy.solvers.solveset.linsolve from sympy.solvers.solveset import linsolve from sympy import symbols w71, w77, w92 = symbols('w71 w77 w92') eqs = [w71+w77-25, w71+w92-40, w77-15] solution = linsolve(eqs, [w71, w77, w92]) # solution = {(10, 15, 30)} In your example, there is only one possible solution, so linsolve returned that solution: w71 = 10, w77 = 15, w92 = 30. However, in case there is more than one possible solution, linsolve will return a parametric form for the general solution: x,y,z = symbols('x y z') eqs = [x+y-10, y+z-20] solution = linsolve(eqs, [x, y, z]) # solution = {(z - 10, 20 - z, z)} Here there is an infinity of possible solutions. linsolve is telling us that we can pick any value for z, and then we'll get the corresponding x and y as x = z - 10 and y = 20 - z. Using numpy.linalg.lstsq lstsq expects the system of equations to be given in matrix form. If there is more than one possible solution, then it will return the most "average" solution. For instance, if the system of equation is simply x + y = 10, then lstsq will return the particular solution x = 5, y = 5 and will ignore more "extreme" solutions such as x = 10, y = 0. from numpy.linalg import lstsq # w_71 + w_77 = 25 # w_71 + w_92 = 40 # w_77 = 15 A = [[1, 1, 0], [1, 0, 1], [0, 1, 0]] b = [25, 40, 15] solution = lstsq(A, b) solution[0] # array([10., 15., 30.]) Here lstsq found the unique solution, w71 = 10, w77=15, w92 = 30. # x + y = 10 # y + z = 20 A = [[1, 1, 0], [0, 1, 1]] b = [10, 20] solution = lstsq(A, B) solution[0] # array([-3.55271368e-15, 1.00000000e+01, 1.00000000e+01]) Here lstsq had to choose a particular solution, and chose the one it considered most "average", x = 0, y = 10, z = 10. You might want to round the solution to integers. One drawback of lstsq is that it doesn't take into account your non-negativity constraint. That is, it might return a solution where one of the variables is negative: # x + y = 2 # y + z = 20 A = [[1, 1, 0], [0, 1, 1]) b = [2, 20] solution = lstsq(A, b) solution[0] # array([-5.33333333, 7.33333333, 12.66666667]) See how lstsq ignored the possible positive solution x = 1, y = 1, z = 18 and instead returned the solution it considered most "average", x = -5.33, y = 7.33, z = 12.67. One way to fix this is to add an equation yourself to force the offending variable to be positive. For instance, here we noticed that lstsq wanted x to be negative, so we can manually force x to be equal to 1 instead, and solve again: # x + y = 2 # y + z = 20 # x = 1 A = [[1, 1, 0], [0, 1, 1], [1, 0, 0]] b = [2, 20, 1] solution = lstsq(A, b) solution[0] # array([ 1., 1., 19.]) Now that we manually forced x to be 1, lstsq found solution x=1, y=1, z=19 which we're more happy with. Using scipy.optimize.linprog The particularity of linprog is that it expects you to specify the "objective" used to choose a particular solution, in case there is more than one possible solution. Also, linprog allows you to specify bounds for the variables. The default is that all variables are nonnegative, which is what you want. from scipy.optimize import linprog # w_71 + w_77 = 25 # w_71 + w_92 = 40 # w_77 = 15 A = [[1, 1, 0], [1, 0, 1], [0, 1, 0]] b = [25, 40, 15] c = [1, 1, 1] # coefficients for objective: minimise w71 + w77 + w92. solution = linprog(c, A_eq = A, b_eq = b) solution.x # array([10., 15., 30.])
Guessing a missing value based on historical data
Let's assume i have 100 different kinds of items, each item got a name and a physical weight. I know the names of all 100 items but only the weight of 80 items. When i ship items, i pack them in groups of 10 and sum the weight of these items. Due to some items are missing their weight, this will give an inaccurate sum when im about to ship. I have different shipments with missing weights Shipment 1 Item Name Item Weight Item 2 10 Item 27 20 Item 42 20 Item 71 - Item 77 - Total weight: 75 Shipment 2 Item Name Item Weight Item 2 10 Item 27 20 Item 42 20 Item 71 - Item 92 - Total weight: 90 Shipment 3 Item Name Item Weight Item 2 10 Item 27 20 Item 42 20 Item 55 35 Item 77 - Total weight: 100 Since some of the shipments share the same items with missing weights and i have the shipments total weight, is there a way with machine learning to determine the weight of these items without im unpacking the entire shipment? Or would it just be a, in this case, 100x3 Matrix with a lot of empty values? At this point im not really sure if i should use some type of regression to solve this or if its just a matrix, that would expand a lot if i had n more items to ship. I also wondered if this was some type of knapsack problem, but i hope anyone can guide my in the right direction.
[ "Forget about machine learning. This is a simple system of linear equations.\nw_71 + w_77 = 25\nw_71 + w_92 = 40\nw_77 = 15\n\nYou can solve it with sympy.solvers.solveset.linsolve, or scipy.optimize.linprog, or scipy.linalg.lstsq, or numpy.linalg.lstsq\n\nsympy.linsolve is maybe the easiest to understand if you are not familiar with matrices; however, if the system is underdetermined, then instead of returning a particular solution to the system, sympy.linsolve will return the general solution in parametric form.\n\nscipy.lstsq or numpy.lstsq expect the problem to be given in matrix form. If there is more than one possible solution, they will return the most \"average\" solution. However, they cannot take any positivity constraint into account: they might return a solution where one of the variables is negative. You can maybe fix this behaviour by adding a new equation to the system to manually force a variable to be positive, then solve again.\n\nscipy.linprog expects the problem to be given in matrix form; it also expects you to specify a linear objective function, to choose which particular solution is \"best\" in case there is more than one possible solution. linprog also considers that all variables are nonnegative by default, or allows you to specify explicit bounds for the variables yourself. It also allows you to add inequality constraints, in addition to the equations, if you wish to.\n\n\nUsing sympy.solvers.solveset.linsolve\nfrom sympy.solvers.solveset import linsolve\nfrom sympy import symbols\n\nw71, w77, w92 = symbols('w71 w77 w92')\n\neqs = [w71+w77-25, w71+w92-40, w77-15]\n\nsolution = linsolve(eqs, [w71, w77, w92])\n# solution = {(10, 15, 30)}\n\nIn your example, there is only one possible solution, so linsolve returned that solution: w71 = 10, w77 = 15, w92 = 30.\nHowever, in case there is more than one possible solution, linsolve will return a parametric form for the general solution:\nx,y,z = symbols('x y z')\n\neqs = [x+y-10, y+z-20]\n\nsolution = linsolve(eqs, [x, y, z])\n# solution = {(z - 10, 20 - z, z)}\n\nHere there is an infinity of possible solutions. linsolve is telling us that we can pick any value for z, and then we'll get the corresponding x and y as x = z - 10 and y = 20 - z.\nUsing numpy.linalg.lstsq\nlstsq expects the system of equations to be given in matrix form. If there is more than one possible solution, then it will return the most \"average\" solution. For instance, if the system of equation is simply x + y = 10, then lstsq will return the particular solution x = 5, y = 5 and will ignore more \"extreme\" solutions such as x = 10, y = 0.\nfrom numpy.linalg import lstsq\n\n# w_71 + w_77 = 25\n# w_71 + w_92 = 40\n# w_77 = 15\nA = [[1, 1, 0], [1, 0, 1], [0, 1, 0]]\nb = [25, 40, 15]\n\nsolution = lstsq(A, b)\nsolution[0]\n# array([10., 15., 30.])\n\nHere lstsq found the unique solution, w71 = 10, w77=15, w92 = 30.\n# x + y = 10\n# y + z = 20\nA = [[1, 1, 0], [0, 1, 1]]\nb = [10, 20]\n\nsolution = lstsq(A, B)\nsolution[0]\n# array([-3.55271368e-15, 1.00000000e+01, 1.00000000e+01])\n\nHere lstsq had to choose a particular solution, and chose the one it considered most \"average\", x = 0, y = 10, z = 10. You might want to round the solution to integers.\nOne drawback of lstsq is that it doesn't take into account your non-negativity constraint. That is, it might return a solution where one of the variables is negative:\n# x + y = 2\n# y + z = 20\nA = [[1, 1, 0], [0, 1, 1])\nb = [2, 20]\n\nsolution = lstsq(A, b)\nsolution[0]\n# array([-5.33333333, 7.33333333, 12.66666667])\n\nSee how lstsq ignored the possible positive solution x = 1, y = 1, z = 18 and instead returned the solution it considered most \"average\", x = -5.33, y = 7.33, z = 12.67.\nOne way to fix this is to add an equation yourself to force the offending variable to be positive. For instance, here we noticed that lstsq wanted x to be negative, so we can manually force x to be equal to 1 instead, and solve again:\n# x + y = 2\n# y + z = 20\n# x = 1\nA = [[1, 1, 0], [0, 1, 1], [1, 0, 0]]\nb = [2, 20, 1]\n\nsolution = lstsq(A, b)\nsolution[0]\n# array([ 1., 1., 19.])\n\nNow that we manually forced x to be 1, lstsq found solution x=1, y=1, z=19 which we're more happy with.\nUsing scipy.optimize.linprog\nThe particularity of linprog is that it expects you to specify the \"objective\" used to choose a particular solution, in case there is more than one possible solution.\nAlso, linprog allows you to specify bounds for the variables. The default is that all variables are nonnegative, which is what you want.\nfrom scipy.optimize import linprog\n\n# w_71 + w_77 = 25\n# w_71 + w_92 = 40\n# w_77 = 15\nA = [[1, 1, 0], [1, 0, 1], [0, 1, 0]]\nb = [25, 40, 15]\nc = [1, 1, 1] # coefficients for objective: minimise w71 + w77 + w92.\n\nsolution = linprog(c, A_eq = A, b_eq = b)\nsolution.x\n# array([10., 15., 30.])\n\n" ]
[ 0 ]
[]
[]
[ "machine_learning", "math", "python" ]
stackoverflow_0074526673_machine_learning_math_python.txt
Q: How to split a file by using string as identifier with python? I have a huge text file and need to split it to some file. In the text file there is an identifier to split the file. Here is some part of the text file looks like: Comp MOFVersion 10.1 Copyright 1997-2006. All rights reserved. -------------------------------------------------- Mon 11/19/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here ... exit --------------------- list volume list partition exit --------------------- Volume 0 is the selected volume. Disk ### Status Size Free Dyn Gpt -------- ------------- ------- ------- --- --- * Disk 0 Online 238 GB 136 GB * -------------------------------------------------- Tue 11/20/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here .... SERVICE_NAME: vds TYPE : 10 WIN32_OWN_PROCESS STATE : 1 STOPPED WIN32_EXIT_CODE : 0 (0x0) SERVICE_EXIT_CODE : 0 (0x0) CHECKPOINT : 0x0 WAIT_HINT : 0x0 --------------------- *exit /b 0 File not found - *.* 0 File(s) copied -------------------------------------------------- Wed 11/21/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here ========================================== Computer: . ========================================== Active: True DmiRevision: 0 list disk exit --------------------- *exit /b 0 11/19/2021 08:34 AM <DIR> . 11/19/2021 08:34 AM <DIR> .. 11/19/2021 08:34 AM 0 SL 1 File(s) 0 bytes 2 Dir(s) 80,160,923,648 bytes free My expectation is split the file by mapping the string "Starting The Process". So if I have a text file like above example, then the file will split to 3 files and each file has differen content. For example: file1 -------------------------------------------------- Mon 11/19/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here ... exit --------------------- list volume list partition exit --------------------- Volume 0 is the selected volume. Disk ### Status Size Free Dyn Gpt -------- ------------- ------- ------- --- --- * Disk 0 Online 238 GB 136 GB * file2 -------------------------------------------------- Tue 11/20/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here .... SERVICE_NAME: vds TYPE : 10 WIN32_OWN_PROCESS STATE : 1 STOPPED WIN32_EXIT_CODE : 0 (0x0) SERVICE_EXIT_CODE : 0 (0x0) CHECKPOINT : 0x0 WAIT_HINT : 0x0 --------------------- *exit /b 0 File not found - *.* 0 File(s) copied file 3 -------------------------------------------------- Wed 11/21/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here ========================================== Computer: . ========================================== Active: True DmiRevision: 0 list disk exit --------------------- *exit /b 0 11/19/2021 08:34 AM <DIR> . 11/19/2021 08:34 AM <DIR> .. 11/19/2021 08:34 AM 0 SL 1 File(s) 0 bytes 2 Dir(s) 80,160,923,648 bytes free here is what i've tried: logfile = "E:/DATA/result.txt" with open(logfile, 'r') as text_file: lines = text_file.readlines() for line in lines: if "Starting The Process..." in line: print(line) I am only able to find the line with the string, but I don't know how to get the content of each line after split to 3 parts and output to new file. Is it possible to do it in Python? Thank you for any advice. A: Well if the file is small enough to comfortably fit into memory (say 1GB or less), you could read the entire file into a string and then use re.findall: with open('data.txt', 'r') as file: data = file.read() parts = re.findall(r'-{10,}[^-]*\n\w{3} \d{2}\/\d{2}\/\d{4}.*?-{10,}.*?(?=-{10,}|$)', data, flags=re.S) cnt = 1 for part in parts: output = open('file ' + str(cnt), 'w') output.write(part) output.close() cnt = cnt + 1 A: An alternative solution if the dashes in the file are of fixed length could be: with open('file.txt', 'r') as f: split_text = f.read().split('--------------------------------------------------') split_text.pop(0) # To remove the Copyright message at the start for i in range(0, len(split_text) - 1, 2): with open(f'file{int(i/2)}.txt', 'w') as temp: temp_txt = ''.join(split_text[i:i+2]) temp.write(temp_txt) Essentially, I am just splitting on the basis of those dashes and joining every consecutive element. This way you keep the info about the timestamp with the content in each file.
How to split a file by using string as identifier with python?
I have a huge text file and need to split it to some file. In the text file there is an identifier to split the file. Here is some part of the text file looks like: Comp MOFVersion 10.1 Copyright 1997-2006. All rights reserved. -------------------------------------------------- Mon 11/19/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here ... exit --------------------- list volume list partition exit --------------------- Volume 0 is the selected volume. Disk ### Status Size Free Dyn Gpt -------- ------------- ------- ------- --- --- * Disk 0 Online 238 GB 136 GB * -------------------------------------------------- Tue 11/20/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here .... SERVICE_NAME: vds TYPE : 10 WIN32_OWN_PROCESS STATE : 1 STOPPED WIN32_EXIT_CODE : 0 (0x0) SERVICE_EXIT_CODE : 0 (0x0) CHECKPOINT : 0x0 WAIT_HINT : 0x0 --------------------- *exit /b 0 File not found - *.* 0 File(s) copied -------------------------------------------------- Wed 11/21/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here ========================================== Computer: . ========================================== Active: True DmiRevision: 0 list disk exit --------------------- *exit /b 0 11/19/2021 08:34 AM <DIR> . 11/19/2021 08:34 AM <DIR> .. 11/19/2021 08:34 AM 0 SL 1 File(s) 0 bytes 2 Dir(s) 80,160,923,648 bytes free My expectation is split the file by mapping the string "Starting The Process". So if I have a text file like above example, then the file will split to 3 files and each file has differen content. For example: file1 -------------------------------------------------- Mon 11/19/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here ... exit --------------------- list volume list partition exit --------------------- Volume 0 is the selected volume. Disk ### Status Size Free Dyn Gpt -------- ------------- ------- ------- --- --- * Disk 0 Online 238 GB 136 GB * file2 -------------------------------------------------- Tue 11/20/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here .... SERVICE_NAME: vds TYPE : 10 WIN32_OWN_PROCESS STATE : 1 STOPPED WIN32_EXIT_CODE : 0 (0x0) SERVICE_EXIT_CODE : 0 (0x0) CHECKPOINT : 0x0 WAIT_HINT : 0x0 --------------------- *exit /b 0 File not found - *.* 0 File(s) copied file 3 -------------------------------------------------- Wed 11/21/2022 8:34:22.35 - Starting The Process... -------------------------------------------------- There are a lot of content here ========================================== Computer: . ========================================== Active: True DmiRevision: 0 list disk exit --------------------- *exit /b 0 11/19/2021 08:34 AM <DIR> . 11/19/2021 08:34 AM <DIR> .. 11/19/2021 08:34 AM 0 SL 1 File(s) 0 bytes 2 Dir(s) 80,160,923,648 bytes free here is what i've tried: logfile = "E:/DATA/result.txt" with open(logfile, 'r') as text_file: lines = text_file.readlines() for line in lines: if "Starting The Process..." in line: print(line) I am only able to find the line with the string, but I don't know how to get the content of each line after split to 3 parts and output to new file. Is it possible to do it in Python? Thank you for any advice.
[ "Well if the file is small enough to comfortably fit into memory (say 1GB or less), you could read the entire file into a string and then use re.findall:\nwith open('data.txt', 'r') as file:\n data = file.read()\n parts = re.findall(r'-{10,}[^-]*\\n\\w{3} \\d{2}\\/\\d{2}\\/\\d{4}.*?-{10,}.*?(?=-{10,}|$)', data, flags=re.S)\n\ncnt = 1\nfor part in parts:\n output = open('file ' + str(cnt), 'w')\n output.write(part)\n output.close()\n cnt = cnt + 1\n\n", "An alternative solution if the dashes in the file are of fixed length could be:\nwith open('file.txt', 'r') as f: \nsplit_text = f.read().split('--------------------------------------------------')\nsplit_text.pop(0) # To remove the Copyright message at the start\n\nfor i in range(0, len(split_text) - 1, 2): \n with open(f'file{int(i/2)}.txt', 'w') as temp: \n temp_txt = ''.join(split_text[i:i+2])\n temp.write(temp_txt) \n\nEssentially, I am just splitting on the basis of those dashes and joining every consecutive element. This way you keep the info about the timestamp with the content in each file.\n" ]
[ 1, 0 ]
[]
[]
[ "filesplitting", "mapping", "python", "string" ]
stackoverflow_0074530313_filesplitting_mapping_python_string.txt
Q: ModuleNotFoundError: No module named 'keras' for Jupyter Notebook I was running Jupyter Notebook and the following error occurs ModuleNotFoundError Traceback (most recent call last) in ----> from keras.models import Sequential from keras.layers import ( Conv2D, MaxPooling2D, Flatten, Dense, Dropout) ModuleNotFoundError: No module named 'keras' I have tried using import sys; sys.path and found this ['/home/xxx/notebook', '/home/xxx/anaconda3/lib/python37.zip', '/home/xxx/anaconda3/lib/python3.7', '/home/xxx/anaconda3/lib/python3.7/lib-dynload', '', '/home/xxx/anaconda3/lib/python3.7/site-packages', '/home/xxx/anaconda3/lib/python3.7/site-packages/IPython/extensions', '/home/xxx/.ipython'] Is there any problem with the installation? Do I need to reinstall everything from python to anaconda. Would some be able to point me to a proper installation of anaconda BTW, if u have installed python, should you install python package through anaconda again Thanks A: You have to install all the dependencies first before using it. Try using conda install tensorflow conda install keras by installing it with conda command it manage your versions compatibility with other libraries. with pip install libraries will only install in your current environment and the latest version of the library sometimes latest libraries are not compatible with the other libraries so we have to take care of version compatibility. A: keras is actually part of tensorflow so all you have to do is just from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Dropout,Activation, Flatten, Conv2D, MaxPooling2D cheers mate A: Create a virtual environment and install all packages and specially jupyter-notebook in it. Some times it is necessary to install jupyter-notebook in each virtual environment to work properly with other libraries. It is preferred to use anaconda. After creating your virtual env use this command to install jupyter: conda install -c anaconda jupyter A: If you have installed all the required packages in a virtual/conda environment, have you tried saving the environment as an ipython kernel? I got those errors when I tried to launch a jupyter notebook from my virtual environment but I hadn't explicitly created a kernel for it. https://ipython.readthedocs.io/en/stable/install/kernel_install.html A: Install packages in Anaconda Navigator -> Enviroments -> Play button -> Open Terminal -> conda install keras A: from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Dropout,Activation, Flatten, Conv2D, MaxPooling2D These two import statements worked for me.
ModuleNotFoundError: No module named 'keras' for Jupyter Notebook
I was running Jupyter Notebook and the following error occurs ModuleNotFoundError Traceback (most recent call last) in ----> from keras.models import Sequential from keras.layers import ( Conv2D, MaxPooling2D, Flatten, Dense, Dropout) ModuleNotFoundError: No module named 'keras' I have tried using import sys; sys.path and found this ['/home/xxx/notebook', '/home/xxx/anaconda3/lib/python37.zip', '/home/xxx/anaconda3/lib/python3.7', '/home/xxx/anaconda3/lib/python3.7/lib-dynload', '', '/home/xxx/anaconda3/lib/python3.7/site-packages', '/home/xxx/anaconda3/lib/python3.7/site-packages/IPython/extensions', '/home/xxx/.ipython'] Is there any problem with the installation? Do I need to reinstall everything from python to anaconda. Would some be able to point me to a proper installation of anaconda BTW, if u have installed python, should you install python package through anaconda again Thanks
[ "You have to install all the dependencies first before using it.\nTry using \n\nconda install tensorflow\nconda install keras\n\nby installing it with conda command it manage your versions compatibility with other libraries.\nwith pip install libraries will only install in your current environment and the latest version of the library sometimes latest libraries are not compatible with the other libraries so we have to take care of version compatibility. \n", "keras is actually part of tensorflow so all you have to do is just\nfrom tensorflow.keras.models import Sequential\n\nfrom tensorflow.keras.layers import Dense,Dropout,Activation, Flatten, Conv2D, MaxPooling2D\n\ncheers mate\n", "Create a virtual environment and install all packages and specially jupyter-notebook in it. Some times it is necessary to install jupyter-notebook in each virtual environment to work properly with other libraries.\nIt is preferred to use anaconda.\nAfter creating your virtual env use this command to install jupyter:\nconda install -c anaconda jupyter\n\n", "If you have installed all the required packages in a virtual/conda environment, have you tried saving the environment as an ipython kernel? I got those errors when I tried to launch a jupyter notebook from my virtual environment but I hadn't explicitly created a kernel for it.\nhttps://ipython.readthedocs.io/en/stable/install/kernel_install.html\n", "Install packages in Anaconda Navigator -> Enviroments -> Play button -> Open Terminal -> conda install keras\n", "from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense,Dropout,Activation, Flatten, Conv2D, MaxPooling2D\n\nThese two import statements worked for me.\n" ]
[ 14, 11, 1, 0, 0, 0 ]
[]
[]
[ "anaconda", "jupyter_notebook", "keras", "python" ]
stackoverflow_0056641165_anaconda_jupyter_notebook_keras_python.txt
Q: Save dict as json using python in databricks I am trying to save a dict as json in azure data lake/databricks however I am getting a File not found error. Any clue what I am doing wrong? import json test_config = { "expectations": [ { "kwargs": { "column": "role", "value_set": [ "BBV", "GEM" ] }, "expectation_type": "expect_column_distinct_values_to_be_in_set", "meta": {} }] } path = '/mnt/lake/enriched/checks/' json_file = 'test_idm_expectations.json' with open(path+json_file, "w") as fp: json.dump(test_config , fp) And the error I am getting is: FileNotFoundError: [Errno 2] No such file or directory: "/mnt/lake/enriched/checks/test_idm_expectations.json" Variations of the path with /dbfs/mnt/lake/enriched/checks/ or dbfs:mnt/lake/enriched/checks/ also do not work. Any help would be super appreciated. Thanks! A: Ensure your python environment sees the mountpoint. You can use os.path.ismount for that. Also, check if the folder tree structure exists. json.dumps will create your file, but only if the folder exists. Also, tip: to keep indentation, use indent=2 or whatever number of spaces you want in your json, to be pretty printed. A: I have reproduced the above and got same error. The above error occurs if we give wrong folder name in the path. you can see as there is no sourcefolder1 in my container, it generated that error. It will not create new folder for us. It only creates that particular file(mysample1.json in this case). You can see if I gave correct folder(which is created before) sourcefolder the file is generated successfully. Give path like /dbfs/mnt/folder/ only as I got same error when I gave path like /mnt/folder/.(open() not identifying paths in the mount might be the reason for it). File Generated: A: Thanks @Remzinho & @Rakesh, the folder structure was indeed not created. adding this snippet to my code before saving the data, solved the issue. if not os.path.exists(path): os.mkdir(path) with open(path+json_file, "w") as fp: json.dump(test_config , fp)
Save dict as json using python in databricks
I am trying to save a dict as json in azure data lake/databricks however I am getting a File not found error. Any clue what I am doing wrong? import json test_config = { "expectations": [ { "kwargs": { "column": "role", "value_set": [ "BBV", "GEM" ] }, "expectation_type": "expect_column_distinct_values_to_be_in_set", "meta": {} }] } path = '/mnt/lake/enriched/checks/' json_file = 'test_idm_expectations.json' with open(path+json_file, "w") as fp: json.dump(test_config , fp) And the error I am getting is: FileNotFoundError: [Errno 2] No such file or directory: "/mnt/lake/enriched/checks/test_idm_expectations.json" Variations of the path with /dbfs/mnt/lake/enriched/checks/ or dbfs:mnt/lake/enriched/checks/ also do not work. Any help would be super appreciated. Thanks!
[ "Ensure your python environment sees the mountpoint.\nYou can use os.path.ismount for that.\nAlso, check if the folder tree structure exists. json.dumps will create your file, but only if the folder exists.\nAlso, tip: to keep indentation, use indent=2 or whatever number of spaces you want in your json, to be pretty printed.\n", "I have reproduced the above and got same error.\n\nThe above error occurs if we give wrong folder name in the path. you can see as there is no sourcefolder1 in my container, it generated that error. It will not create new folder for us. It only creates that particular file(mysample1.json in this case).\nYou can see if I gave correct folder(which is created before) sourcefolder the file is generated successfully.\n\nGive path like /dbfs/mnt/folder/ only as I got same error when I gave path like /mnt/folder/.(open() not identifying paths in the mount might be the reason for it).\nFile Generated:\n\n", "Thanks @Remzinho & @Rakesh, the folder structure was indeed not created. adding this snippet to my code before saving the data, solved the issue.\nif not os.path.exists(path):\n os.mkdir(path)\n\nwith open(path+json_file, \"w\") as fp:\n json.dump(test_config , fp)\n\n" ]
[ 1, 1, 0 ]
[]
[]
[ "azure_data_lake", "databricks", "json", "python" ]
stackoverflow_0074525326_azure_data_lake_databricks_json_python.txt
Q: Writing the requirements/setup file for a Python package Are there any best practices on how to select the versions of the required packages for your own python package? You can always do pip freeze > requirements.txt, but this will set every used package to a specific version. If this package is used with another one using the same requirement with a different specific version, you will have a problem when anaconda, pip-tools, or poetry try to find the right combination of dependencies. To let your package work with others, the best would be to let the version selected be as wide as possible, for example numpy>=1.20,<1.21. Is there a good way to do this for the entire set of requirements of a package? A: In the packaging metadata of your library (in other words: in the setup.py, setup.cfg, or pyproject.toml), only the direct dependencies should be listed. The direct dependencies are the ones that are directly imported by the library's code (and the ones that are called in sub-processes, but that is quite a rare case). Since no one can predict the future, my advice regarding version constraints on the dependencies is to only exclude the versions (or version ranges) for which you are 100% sure that they are incompatible with your library. For example, we are working on MyLib v9 and want to add a dependency on SomeLib which has only v1 and v2 available. The features of SomeLib that we want are not available in v1 but they are on v2. We do not know if those features will still be there in SomeLib v3+ or not, since we can not predict the future. So in the packaging metadata of our MyLib v9, we should declare dependency SomeLib>=2 (or SomeLib!=1). Some months later SomeLib v3 and then SomeLib v4 are released, and they are indeed compatible with our library. We do not need to do anything, whoever installs our MyLib will automatically get the latest version of SomeLib, which is v4 and is compatible. Some more months later, SomeLib v5 is released and it is not compatible with our library. So we should release MyLib v9.post1 where we adjust the dependency specification in packaging metadata with SomeLib>=2,<5 (or SomeLib!=1,!=5). If when the incompatible SomeLib v5 is released, we have all abandoned the maintenance of our MyLib and do not release the post-build, then users of our MyLib are still able to manually exclude the problematic SomeLib v5 from dependency resolution (for example with a pip constraints.txt file). This is well supported in the Python packaging ecosystem, very easy to do. On the other hand, if right from the start we had eagerly excluded everything but SomeLib v2 in our initial MyLib v9 release (maybe with a dependency specification such as SomeLib==2), and then immediately abandoned the maintenance of the library, then no one would have been able to install our MyLib v9 with SomeLib v3 (or v4), even though it is a perfectly valid combination. This eager over-specification of exclusions is the cause of a lot of (unsolvable) issues, a near dead-end, this should really be avoided. That is why, from my point of view, the only thing dependency version constraints should do is exclude well-known (and proven) incompatibilities. Then, as much as possible, publish "post build" releases to improve the version constraints as new incompatibilities appear. References https://caremad.io/posts/2013/07/setup-vs-requirement/ https://peps.python.org/pep-0440/#post-release-separators https://pip.pypa.io/en/stable/user_guide/#constraints-files
Writing the requirements/setup file for a Python package
Are there any best practices on how to select the versions of the required packages for your own python package? You can always do pip freeze > requirements.txt, but this will set every used package to a specific version. If this package is used with another one using the same requirement with a different specific version, you will have a problem when anaconda, pip-tools, or poetry try to find the right combination of dependencies. To let your package work with others, the best would be to let the version selected be as wide as possible, for example numpy>=1.20,<1.21. Is there a good way to do this for the entire set of requirements of a package?
[ "In the packaging metadata of your library (in other words: in the setup.py, setup.cfg, or pyproject.toml), only the direct dependencies should be listed. The direct dependencies are the ones that are directly imported by the library's code (and the ones that are called in sub-processes, but that is quite a rare case). Since no one can predict the future, my advice regarding version constraints on the dependencies is to only exclude the versions (or version ranges) for which you are 100% sure that they are incompatible with your library.\nFor example, we are working on MyLib v9 and want to add a dependency on SomeLib which has only v1 and v2 available. The features of SomeLib that we want are not available in v1 but they are on v2. We do not know if those features will still be there in SomeLib v3+ or not, since we can not predict the future. So in the packaging metadata of our MyLib v9, we should declare dependency SomeLib>=2 (or SomeLib!=1).\nSome months later SomeLib v3 and then SomeLib v4 are released, and they are indeed compatible with our library. We do not need to do anything, whoever installs our MyLib will automatically get the latest version of SomeLib, which is v4 and is compatible.\nSome more months later, SomeLib v5 is released and it is not compatible with our library. So we should release MyLib v9.post1 where we adjust the dependency specification in packaging metadata with SomeLib>=2,<5 (or SomeLib!=1,!=5).\nIf when the incompatible SomeLib v5 is released, we have all abandoned the maintenance of our MyLib and do not release the post-build, then users of our MyLib are still able to manually exclude the problematic SomeLib v5 from dependency resolution (for example with a pip constraints.txt file). This is well supported in the Python packaging ecosystem, very easy to do.\nOn the other hand, if right from the start we had eagerly excluded everything but SomeLib v2 in our initial MyLib v9 release (maybe with a dependency specification such as SomeLib==2), and then immediately abandoned the maintenance of the library, then no one would have been able to install our MyLib v9 with SomeLib v3 (or v4), even though it is a perfectly valid combination. This eager over-specification of exclusions is the cause of a lot of (unsolvable) issues, a near dead-end, this should really be avoided.\nThat is why, from my point of view, the only thing dependency version constraints should do is exclude well-known (and proven) incompatibilities. Then, as much as possible, publish \"post build\" releases to improve the version constraints as new incompatibilities appear.\nReferences\n\nhttps://caremad.io/posts/2013/07/setup-vs-requirement/\nhttps://peps.python.org/pep-0440/#post-release-separators\nhttps://pip.pypa.io/en/stable/user_guide/#constraints-files\n\n" ]
[ 1 ]
[]
[]
[ "python", "python_packaging", "requirements.txt" ]
stackoverflow_0074525503_python_python_packaging_requirements.txt.txt
Q: Is there any way to define app.Table without using Record in faust? I'm currently using schema registry and faust to process stream data. The reason I try to avoid using faust.Record is the schema can be dynamically changed and I don't like to change the code(class inheriting faust.Record) every time it happend. But without faust.Record, it looks like there are many restrictions. For example, app.Table's relative_to_field requires FieldDescriptorT but this class looks stronly coupled with faust.Record Here is the code: import faust from datetime import timedelta from pydantic_avro.base import AvroBase from schema_registry.client import SchemaRegistryClient, schema from schema_registry.serializers.faust import FaustSerializer topic_name = "practice4" subject_name = f"{topic_name}-value" serializer_name = f"{topic_name}_serializer" bootstrap_server = "192.168.59.100:30887" sr_server = "http://localhost:8081" client = SchemaRegistryClient({"url": sr_server}) topic_schema = client.get_schema(subject_name) fp_avro_schema = schema.AvroSchema(topic_schema.schema.raw_schema) avro_fp_serializer = FaustSerializer(client, serializer_name, fp_avro_schema) faust.serializers.codecs.register(name=serializer_name, codec=avro_fp_serializer) app = faust.App('sample_app', broker=bootstrap_server) faust_topic = app.topic(topic_name, value_serializer=serializer_name) count_table = app.Table( 'count_table', default=int, ).hopping( timedelta(minutes=10), timedelta(minutes=5), expires=timedelta(minutes=10) ).relative_to_field(??????) @app.agent(faust_topic) async def process_fp(fps): async for fp in fps.group_by(lambda fp: fp["job_id"], name=f"{subject_name}.job_id"): print(fp) Luckily, stream's group_by can be called with callable object, so I can handle it with lambda but table's relative_to_field has no option such like that. A: Short answer: No, you're right, you cannot define relative_to_field without a FieldDescriptor. You can check the definiton of relative_to_field here. Then, this field is extracted here with a getattr, you need a faust.Record for this operation. However, as you use Avro you may use the library dataclasses-avroschema to combine faust.Record and Avro in the same class. Thus, you will be able to use best of both world. This library integrates well with faust as you can see in doc.
Is there any way to define app.Table without using Record in faust?
I'm currently using schema registry and faust to process stream data. The reason I try to avoid using faust.Record is the schema can be dynamically changed and I don't like to change the code(class inheriting faust.Record) every time it happend. But without faust.Record, it looks like there are many restrictions. For example, app.Table's relative_to_field requires FieldDescriptorT but this class looks stronly coupled with faust.Record Here is the code: import faust from datetime import timedelta from pydantic_avro.base import AvroBase from schema_registry.client import SchemaRegistryClient, schema from schema_registry.serializers.faust import FaustSerializer topic_name = "practice4" subject_name = f"{topic_name}-value" serializer_name = f"{topic_name}_serializer" bootstrap_server = "192.168.59.100:30887" sr_server = "http://localhost:8081" client = SchemaRegistryClient({"url": sr_server}) topic_schema = client.get_schema(subject_name) fp_avro_schema = schema.AvroSchema(topic_schema.schema.raw_schema) avro_fp_serializer = FaustSerializer(client, serializer_name, fp_avro_schema) faust.serializers.codecs.register(name=serializer_name, codec=avro_fp_serializer) app = faust.App('sample_app', broker=bootstrap_server) faust_topic = app.topic(topic_name, value_serializer=serializer_name) count_table = app.Table( 'count_table', default=int, ).hopping( timedelta(minutes=10), timedelta(minutes=5), expires=timedelta(minutes=10) ).relative_to_field(??????) @app.agent(faust_topic) async def process_fp(fps): async for fp in fps.group_by(lambda fp: fp["job_id"], name=f"{subject_name}.job_id"): print(fp) Luckily, stream's group_by can be called with callable object, so I can handle it with lambda but table's relative_to_field has no option such like that.
[ "Short answer: No, you're right, you cannot define relative_to_field without a FieldDescriptor. You can check the definiton of relative_to_field here. Then, this field is extracted here with a getattr, you need a faust.Record for this operation.\nHowever, as you use Avro you may use the library dataclasses-avroschema to combine faust.Record and Avro in the same class. Thus, you will be able to use best of both world.\nThis library integrates well with faust as you can see in doc.\n" ]
[ 0 ]
[]
[]
[ "faust", "python" ]
stackoverflow_0074062969_faust_python.txt
Q: How to use multiple urls with PIP_EXTRA_INDEX_URL I want to configure my pip using environmental variables. I already have two pip index urls. So I'm already using PIP_INDEX_URL and PIP_EXTRA_INDEX_URL variables. PIP_INDEX_URL="https://example.com" PIP_EXTRA_INDEX_URL="https://example2.com" But I want to add one more index url. I don't know how I tried to add it with ; PIP_INDEX_URL="https://example.com" PIP_EXTRA_INDEX_URL="https://example2.com;https://example3.com" But it didn't seem to work A: Pip expects an empty space ( ) to separate the values in environment variables. In this case, for example: PIP_EXTRA_INDEX_URL="https://example2.com https://example3.com" See pip's documentation section "Environment variables".
How to use multiple urls with PIP_EXTRA_INDEX_URL
I want to configure my pip using environmental variables. I already have two pip index urls. So I'm already using PIP_INDEX_URL and PIP_EXTRA_INDEX_URL variables. PIP_INDEX_URL="https://example.com" PIP_EXTRA_INDEX_URL="https://example2.com" But I want to add one more index url. I don't know how I tried to add it with ; PIP_INDEX_URL="https://example.com" PIP_EXTRA_INDEX_URL="https://example2.com;https://example3.com" But it didn't seem to work
[ "Pip expects an empty space ( ) to separate the values in environment variables. In this case, for example:\nPIP_EXTRA_INDEX_URL=\"https://example2.com https://example3.com\"\n\nSee pip's documentation section \"Environment variables\".\n" ]
[ 1 ]
[]
[]
[ "pip", "python" ]
stackoverflow_0074525250_pip_python.txt
Q: Remove last symbol in row My code is: n = 3 for i in range(1, n+1): for j in range(1, n+1): print(j*i, end='*') print(end='\b\n') Result of this code is: 1*2*3* 2*4*6* 3*6*9* But I need expected result like this (without aesthetics in end of rows): 1*2*3 2*4*6 3*6*9 A: Use '*'.join() instead of the end parameter in print() for i in range(1, n + 1): print('*'.join(f'{j * i}' for j in range(1, n + 1)), end='\n') Output 1*2*3 2*4*6 3*6*9
Remove last symbol in row
My code is: n = 3 for i in range(1, n+1): for j in range(1, n+1): print(j*i, end='*') print(end='\b\n') Result of this code is: 1*2*3* 2*4*6* 3*6*9* But I need expected result like this (without aesthetics in end of rows): 1*2*3 2*4*6 3*6*9
[ "Use '*'.join() instead of the end parameter in print()\nfor i in range(1, n + 1):\n print('*'.join(f'{j * i}' for j in range(1, n + 1)), end='\\n')\n\nOutput\n1*2*3\n2*4*6\n3*6*9\n\n" ]
[ 0 ]
[ "Just check if you're not at the last value in the range. If you aren't print '*' if you are don't do anything. View the below code for clarification.\nn = 3\nfor i in range(1, n+1):\n for j in range(1, n+1):\n print(j*i, end='')\n if j != n:\n print('*', end='')\n print(end='\\b\\n')\n\n", "Using list comprehension we create a list to print in one line and then you can use '*' to expand the list. Then we use 'sep' (separator) param of print function to join them\nn = 3\nfor i in range(1, n+1):\n print(*[j*i for j in range(1, n+1)], sep='*', end=\"\\n\")\n\n", "Solution:\nn = int(input())\nfor i in range(1, n+1):\n for j in range(1, n+1):\n if(j*i == n*n):\n print(j*i)\n else:\n print(j*i, end='*')\n \n print(end='\\b\\n')\n\n" ]
[ -1, -1, -1 ]
[ "python" ]
stackoverflow_0074530598_python.txt
Q: Element Tree - Iterate dictionary to append elements to new line xml I am attempting to append elements to an existing .xml using ElementTree. I have the desired attributes stored as a list of dictionaries: myDict = [{"name": "dan", "age": "25", "subject":"maths"}, {"name": "susan", "age": "27", "subject":"english"}, {"name": "leo", "age": "24", "subject":"psychology"}] And I use the following code for the append: import xml.etree.ElementTree as ET tree = ET.parse('<path to existing .xml') root = tree.getroot() for x,y in enumerate(myDict): root.append(ET.Element("student", attrib=myDict[x])) tree.write('<path to .xml>') This works mostly fine except that all elements are appended as a single line. I'd like to make each element append to be on a new line: # Not this: <student name='dan' age='25' subject='maths' /><student name='susan' age='27' subject='english' /><student name='leo' age='24' subject='psychology' /> # But this: <student name='dan' age='25' subject='maths' /> <student name='susan' age='27' subject='english' /> <student name='leo' age='24' subject='psychology' /> I have attempted use lxml and pass the pretty_print=True argument within the tree.write call but it had no effect. I'm sure I'm missing something simple here, so your help is appreciated! A: With pointers from here (Thanks @Thicc_Gandhi), I solved it by amending the iteration to: for x,y in enumerate(MyDict): elem = ET.Element("student",attrib=myDict[x]) elem.tail = "\n" root.append(elem)
Element Tree - Iterate dictionary to append elements to new line xml
I am attempting to append elements to an existing .xml using ElementTree. I have the desired attributes stored as a list of dictionaries: myDict = [{"name": "dan", "age": "25", "subject":"maths"}, {"name": "susan", "age": "27", "subject":"english"}, {"name": "leo", "age": "24", "subject":"psychology"}] And I use the following code for the append: import xml.etree.ElementTree as ET tree = ET.parse('<path to existing .xml') root = tree.getroot() for x,y in enumerate(myDict): root.append(ET.Element("student", attrib=myDict[x])) tree.write('<path to .xml>') This works mostly fine except that all elements are appended as a single line. I'd like to make each element append to be on a new line: # Not this: <student name='dan' age='25' subject='maths' /><student name='susan' age='27' subject='english' /><student name='leo' age='24' subject='psychology' /> # But this: <student name='dan' age='25' subject='maths' /> <student name='susan' age='27' subject='english' /> <student name='leo' age='24' subject='psychology' /> I have attempted use lxml and pass the pretty_print=True argument within the tree.write call but it had no effect. I'm sure I'm missing something simple here, so your help is appreciated!
[ "With pointers from here (Thanks @Thicc_Gandhi), I solved it by amending the iteration to:\nfor x,y in enumerate(MyDict):\n elem = ET.Element(\"student\",attrib=myDict[x])\n elem.tail = \"\\n\"\n root.append(elem)\n\n" ]
[ 0 ]
[]
[]
[ "elementtree", "python", "xml" ]
stackoverflow_0074530128_elementtree_python_xml.txt
Q: Set the value in Selenium with python I'm trying to set the value in this field This is the inpect code: <dnx-textfield label="First / Leading IP Address" placeholder="" name="primaryIpv4Address" hint="" maxwidth="300px" error="" regex="" message="" validator="" regextype="" type="text" validateon="onblur" hinthelper="" inputid="management-DnxTextfield-deploymentModel" errorhelper="" expandable="" showhide="" source="none" errormsg="" maxlength="2000" autocomplete="off" defined="" required="" data-analyticsid="dnx-textfield[name='primaryIpv4Address']" value="" isfocus=""> <div> <div style="display: flex; flex-direction: row;"><div class="dnx-shadow form-group floating-label _float " style="width: 300px;"> <div id="dnx_inputfield_containermanagement-DnxTextfield-deploymentModel" class="dnx_inputfield_container"> <input id="management-DnxTextfield-deploymentModel" class="form-control value" type="text" placeholder="" autocomplete="off" data-testid="dnxTextfield-input" maxlength="2000"></div><label class="control-label " for="management-DnxTextfield-deploymentModel">First / Leading IP Address<span class="required " style="color: red;">*</span></label><small>&nbsp;</small></div> </div> </div> </dnx-textfield> where I tried to fill 192.168.1.1 in value="" <dnx-textfield label="First / Leading IP Address" ... value="" isfocus=""> But didn't work with several trial. - driver.find_element(By.XPATH, "//*[@name='primaryIpv4Address' and @label='First / Leading IP Address']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//input[@id='management-DnxTextfield-deploymentModel']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.ID, "management-DnxTextfield-deploymentModel").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//*[@data-testid='dnxTextfield-input' and @id='management-DnxTextfield-deploymentModel']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//*[@id='management-DnxTextfield-deploymentModel']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//*[@id='management-DnxTextfield-primarySubnetIpv4Address']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//*[@name='primaryIpv4Address' and @label='First / Leading IP Address']").send_keys("192.168.1.1"+Keys.TAB) Is there anyone who can help and take a look at this? Thanks! A: Below is the Java code using which you can do it. You need to use java script executor. WebElement element = driver.findElement(By.xpath("enter the xpath here")); JavascriptExecutor jse = (JavascriptExecutor)driver; jse.executeScript("arguments[0].value='enter the value here';", element);
Set the value in Selenium with python
I'm trying to set the value in this field This is the inpect code: <dnx-textfield label="First / Leading IP Address" placeholder="" name="primaryIpv4Address" hint="" maxwidth="300px" error="" regex="" message="" validator="" regextype="" type="text" validateon="onblur" hinthelper="" inputid="management-DnxTextfield-deploymentModel" errorhelper="" expandable="" showhide="" source="none" errormsg="" maxlength="2000" autocomplete="off" defined="" required="" data-analyticsid="dnx-textfield[name='primaryIpv4Address']" value="" isfocus=""> <div> <div style="display: flex; flex-direction: row;"><div class="dnx-shadow form-group floating-label _float " style="width: 300px;"> <div id="dnx_inputfield_containermanagement-DnxTextfield-deploymentModel" class="dnx_inputfield_container"> <input id="management-DnxTextfield-deploymentModel" class="form-control value" type="text" placeholder="" autocomplete="off" data-testid="dnxTextfield-input" maxlength="2000"></div><label class="control-label " for="management-DnxTextfield-deploymentModel">First / Leading IP Address<span class="required " style="color: red;">*</span></label><small>&nbsp;</small></div> </div> </div> </dnx-textfield> where I tried to fill 192.168.1.1 in value="" <dnx-textfield label="First / Leading IP Address" ... value="" isfocus=""> But didn't work with several trial. - driver.find_element(By.XPATH, "//*[@name='primaryIpv4Address' and @label='First / Leading IP Address']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//input[@id='management-DnxTextfield-deploymentModel']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.ID, "management-DnxTextfield-deploymentModel").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//*[@data-testid='dnxTextfield-input' and @id='management-DnxTextfield-deploymentModel']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//*[@id='management-DnxTextfield-deploymentModel']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//*[@id='management-DnxTextfield-primarySubnetIpv4Address']").send_keys("192.168.1.1"+Keys.TAB) - driver.find_element(By.XPATH, "//*[@name='primaryIpv4Address' and @label='First / Leading IP Address']").send_keys("192.168.1.1"+Keys.TAB) Is there anyone who can help and take a look at this? Thanks!
[ "Below is the Java code using which you can do it. You need to use java script executor.\n WebElement element = driver.findElement(By.xpath(\"enter the xpath here\"));\n JavascriptExecutor jse = (JavascriptExecutor)driver;\n jse.executeScript(\"arguments[0].value='enter the value here';\", element);\n\n" ]
[ 0 ]
[]
[]
[ "python", "selenium", "selenium_webdriver" ]
stackoverflow_0074530221_python_selenium_selenium_webdriver.txt
Q: Select during vs. after insert produces different results My database is written to every second during certain hours. It's also read from during same hours, every minute. The read outputs different values during operational hours vs. after hours. Might be data is not written when I read. How to fix this or make sure data for last minute is complete before reading? Would a different database do better? How I am reading: conn = sqlite3.connect(f'{loc_tick}/tick.db', detect_types=sqlite3.PARSE_DECLTYPES, timeout=20, isolation_level=None) select_statement = f"select * from symfut WHERE timestamp >= date('now', '-10 days')" m1df = pd.read_sql(select_statement, conn) Write: conn = sqlite3.connect('tick.db', detect_types=sqlite3.PARSE_DECLTYPES, timeout=20,isolation_level=None) c = conn.cursor() c.execute('PRAGMA journal_mode=wal') c.execute('PRAGMA wal_autocheckpoint = 100') c.execute('INSERT INTO symfut (timestamp, c, b, a) VALUES (?,?,?,?)', (timestamp, c, b, a)) A: While SQLite does not support full concurrency, it doesn't mean that it can't correctly address your needs. Using Postgress or a different DB could actually worsen the problem. Sqlite is highly reliable and is absolutely deterministic in its behaviour and is fully capable of handling scenarios with one writer and many readers, if all the processes are on the same machine and the db file is not on a network. When a write transaction ends, even in WAL mode, every subsequent read is guaranteed to read the updated values. Your problem could be related to this: when you are reading, the write transaction is still open and not completed. This can happen if reader and writer are separate processes which are not synchronized. Using a different DB will not resolve it, you must provide a synchronization mechanism. When you write you provide a timestamp. How is it calculated? Is it in UTC timezone? How much time passes from when you calculate the timestamp and when you write it? Note that the INSERT may have to wait up to your timeout before executing. When you read you use 'now - 10 days', which is calulated in UTC timezone and resolved at the time the SELECT is executed, without any delay. Combine this with the timestamp INSERTED and this could make some difference between operational hours and after hours. If you INSERT a row every second, your SELECT will return hundreds of thousands of rows and will probably need a few seconds to execute. Since Sqlite guarantees consistency, rows inserted while the select executes will not be considered. This could be another point of difference between executing the select while rows are inserted or in the off hours.
Select during vs. after insert produces different results
My database is written to every second during certain hours. It's also read from during same hours, every minute. The read outputs different values during operational hours vs. after hours. Might be data is not written when I read. How to fix this or make sure data for last minute is complete before reading? Would a different database do better? How I am reading: conn = sqlite3.connect(f'{loc_tick}/tick.db', detect_types=sqlite3.PARSE_DECLTYPES, timeout=20, isolation_level=None) select_statement = f"select * from symfut WHERE timestamp >= date('now', '-10 days')" m1df = pd.read_sql(select_statement, conn) Write: conn = sqlite3.connect('tick.db', detect_types=sqlite3.PARSE_DECLTYPES, timeout=20,isolation_level=None) c = conn.cursor() c.execute('PRAGMA journal_mode=wal') c.execute('PRAGMA wal_autocheckpoint = 100') c.execute('INSERT INTO symfut (timestamp, c, b, a) VALUES (?,?,?,?)', (timestamp, c, b, a))
[ "While SQLite does not support full concurrency, it doesn't mean that it can't correctly address your needs. Using Postgress or a different DB could actually worsen the problem.\nSqlite is highly reliable and is absolutely deterministic in its behaviour and is fully capable of handling scenarios with one writer and many readers, if all the processes are on the same machine and the db file is not on a network.\nWhen a write transaction ends, even in WAL mode, every subsequent read is guaranteed to read the updated values.\nYour problem could be related to this:\n\nwhen you are reading, the write transaction is still open and not completed. This can happen if reader and writer are separate processes which are not synchronized. Using a different DB will not resolve it, you must provide a synchronization mechanism.\n\nWhen you write you provide a timestamp. How is it calculated? Is it in UTC timezone? How much time passes from when you calculate the timestamp and when you write it? Note that the INSERT may have to wait up to your timeout before executing.\n\nWhen you read you use 'now - 10 days', which is calulated in UTC timezone and resolved at the time the SELECT is executed, without any delay. Combine this with the timestamp INSERTED and this could make some difference between operational hours and after hours.\n\nIf you INSERT a row every second, your SELECT will return hundreds of thousands of rows and will probably need a few seconds to execute. Since Sqlite guarantees consistency, rows inserted while the select executes will not be considered. This could be another point of difference between executing the select while rows are inserted or in the off hours.\n\n\n" ]
[ 0 ]
[]
[]
[ "python", "sqlite" ]
stackoverflow_0074525915_python_sqlite.txt
Q: import cv2 in python in vs code not working The python code I wanted to run: import cv2 print(cv2.__verion__) The Error code I am getting: Windows PowerShell Copyright (C) Microsoft Corporation. All rights reserved. Try the new cross-platform PowerShell https://aka.ms/pscore6 PS D:\Programme\Visual Studio\New Projects> & C:/Users/Florian/AppData/Local/Programs/Python/Python310/python.exe "d:/Programme/Visual Studio/New Projects/Python/opencv_test.py" Traceback (most recent call last): File "d:\Programme\Visual Studio\New Projects\Python\opencv_test.py", line 1, in <module> import cv2 ModuleNotFoundError: No module named 'cv2' But if I type the exact same code in the Terminal it works: PS D:\Programme\Visual Studio\New Projects> python Python 3.9.9 (tags/v3.9.9:ccb0e6a, Nov 15 2021, 18:08:50) [MSC v.1929 64 bit (AMD64)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import cv2 >>> print(cv2.__version__) 4.5.4 >>> So far I installed python via Command Prompt and via Microsoft Store. And currently I am running the verion 3.9.9 PS D:\Programme\Visual Studio\New Projects> python --version Python 3.9.9 I installed opencv in the Command Prompt: PS C:\Users\Florian> pip install opencv-python Requirement already satisfied: opencv-python in c:\users\florian\appdata\local\packages\pythonsoftwarefoundation.python.3.9_qbz5n2kfra8p0\localcache\local-packages\python39\site-packages (4.5.4.60) Requirement already satisfied: numpy>=1.19.3 in c:\users\florian\appdata\local\packages\pythonsoftwarefoundation.python.3.9_qbz5n2kfra8p0\localcache\local-packages\python39\site-packages (from opencv-python) (1.21.5) PS C:\Users\Florian> I searched every Forum I could think of, but cant find a fix. Please help A: I had the same issue in VScode using python 3.9.6. I switched to python 3.10.7 and it worked fine. A: in your VScode press Ctrl+Shift+P and then type "Python: Select Interpreter" then select your python Interpreter and Run it again. A: Try running pip3.10 install opencv-python. Because: cv2 is a part of opencv-python and/or you might not have installed the module in the right python folder version.
import cv2 in python in vs code not working
The python code I wanted to run: import cv2 print(cv2.__verion__) The Error code I am getting: Windows PowerShell Copyright (C) Microsoft Corporation. All rights reserved. Try the new cross-platform PowerShell https://aka.ms/pscore6 PS D:\Programme\Visual Studio\New Projects> & C:/Users/Florian/AppData/Local/Programs/Python/Python310/python.exe "d:/Programme/Visual Studio/New Projects/Python/opencv_test.py" Traceback (most recent call last): File "d:\Programme\Visual Studio\New Projects\Python\opencv_test.py", line 1, in <module> import cv2 ModuleNotFoundError: No module named 'cv2' But if I type the exact same code in the Terminal it works: PS D:\Programme\Visual Studio\New Projects> python Python 3.9.9 (tags/v3.9.9:ccb0e6a, Nov 15 2021, 18:08:50) [MSC v.1929 64 bit (AMD64)] on win32 Type "help", "copyright", "credits" or "license" for more information. >>> import cv2 >>> print(cv2.__version__) 4.5.4 >>> So far I installed python via Command Prompt and via Microsoft Store. And currently I am running the verion 3.9.9 PS D:\Programme\Visual Studio\New Projects> python --version Python 3.9.9 I installed opencv in the Command Prompt: PS C:\Users\Florian> pip install opencv-python Requirement already satisfied: opencv-python in c:\users\florian\appdata\local\packages\pythonsoftwarefoundation.python.3.9_qbz5n2kfra8p0\localcache\local-packages\python39\site-packages (4.5.4.60) Requirement already satisfied: numpy>=1.19.3 in c:\users\florian\appdata\local\packages\pythonsoftwarefoundation.python.3.9_qbz5n2kfra8p0\localcache\local-packages\python39\site-packages (from opencv-python) (1.21.5) PS C:\Users\Florian> I searched every Forum I could think of, but cant find a fix. Please help
[ "I had the same issue in VScode using python 3.9.6. I switched to python 3.10.7 and it worked fine.\n", "in your VScode press Ctrl+Shift+P and then type \"Python: Select Interpreter\" then select your python Interpreter and Run it again.\n", "Try running\npip3.10 install opencv-python.\nBecause:\n\ncv2 is a part of opencv-python\nand/or you might not have installed the module in the right python folder version.\n\n" ]
[ 1, 0, 0 ]
[]
[]
[ "opencv", "python", "visual_studio_code" ]
stackoverflow_0070451971_opencv_python_visual_studio_code.txt
Q: google colab import imagemagick PolicyError: not authorized `file.pdf' used google colab. had to install imagemagick as a dependency for pdfplumber lib to work. !apt install imagemagick also !pip install pdfplumber then my code looked like this - pdf = pdfplumber.open("file.pdf") # Import the PDF. page = pdf.pages[0] im = page.to_image() im when running this piece of code got this error - --------------------------------------------------------------------------- PolicyError Traceback (most recent call last) <ipython-input-56-61f21a06c7de> in <module> ---> 14 im = page.to_image() 5 frames /usr/local/lib/python3.7/dist-packages/wand/resource.py in raise_exception(self, stacklevel) 223 warnings.warn(e, stacklevel=stacklevel + 1) 224 elif isinstance(e, Exception): --> 225 raise e 226 227 def make_blob(self, format=None): PolicyError: not authorized `file.pdf' @ error/constitute.c/ReadImage/412 A: found this answer helpful (with the help of pdfplumber team) going into etc/ImageMagick-6/policy.xml Had to change this: <policy domain="coder" rights="none" pattern="PDF"/> to this: <policy domain="coder" rights="read|write" pattern="PDF"/> then ran again and the photo appeared. Solved it for me :)
google colab import imagemagick PolicyError: not authorized `file.pdf'
used google colab. had to install imagemagick as a dependency for pdfplumber lib to work. !apt install imagemagick also !pip install pdfplumber then my code looked like this - pdf = pdfplumber.open("file.pdf") # Import the PDF. page = pdf.pages[0] im = page.to_image() im when running this piece of code got this error - --------------------------------------------------------------------------- PolicyError Traceback (most recent call last) <ipython-input-56-61f21a06c7de> in <module> ---> 14 im = page.to_image() 5 frames /usr/local/lib/python3.7/dist-packages/wand/resource.py in raise_exception(self, stacklevel) 223 warnings.warn(e, stacklevel=stacklevel + 1) 224 elif isinstance(e, Exception): --> 225 raise e 226 227 def make_blob(self, format=None): PolicyError: not authorized `file.pdf' @ error/constitute.c/ReadImage/412
[ "found this answer helpful (with the help of pdfplumber team)\ngoing into etc/ImageMagick-6/policy.xml\nHad to change this:\n<policy domain=\"coder\" rights=\"none\" pattern=\"PDF\"/>\n\nto this:\n<policy domain=\"coder\" rights=\"read|write\" pattern=\"PDF\"/>\n\nthen ran again and the photo appeared. Solved it for me :)\n" ]
[ 1 ]
[]
[]
[ "google_colaboratory", "imagemagick", "pdf", "python" ]
stackoverflow_0074530824_google_colaboratory_imagemagick_pdf_python.txt
Q: Changing a column type in a very large pandas dataframe is too slow I have a very large dataframe, around 80GB. I want to change the type of some of its columns from object to category. Trying to do it this way: df[col_name] = df[col_name].astype('category') Takes around 1 minute per column, which is a lot. My first question would be why does it take that long? Just running: df[col_name].astype('category') takes just around 1 second. I tried something like: temp = df[col_name].astype('category') df.drop(columns=[col_name]) df[col_name] = temp In this case it turns out that dropping the column is also very slow. Now, I also tried replacing drop by del, that is temp = df[col_name].astype('category') del df[col_name] df[col_name] = temp Surprisingly (for me) this was very fast. So My second question is why is del so much faster than drop in this case? What is the most "correct" way of doing this conversion, and what is the most efficient (hopefully they are the same)? Thanks A: You could use something like df['col_name'].values.astype('category') A: If reassigning the column was the slow operation, doing the conversion in-place should speed up the process : df[col_name].astype('category', inplace = True)
Changing a column type in a very large pandas dataframe is too slow
I have a very large dataframe, around 80GB. I want to change the type of some of its columns from object to category. Trying to do it this way: df[col_name] = df[col_name].astype('category') Takes around 1 minute per column, which is a lot. My first question would be why does it take that long? Just running: df[col_name].astype('category') takes just around 1 second. I tried something like: temp = df[col_name].astype('category') df.drop(columns=[col_name]) df[col_name] = temp In this case it turns out that dropping the column is also very slow. Now, I also tried replacing drop by del, that is temp = df[col_name].astype('category') del df[col_name] df[col_name] = temp Surprisingly (for me) this was very fast. So My second question is why is del so much faster than drop in this case? What is the most "correct" way of doing this conversion, and what is the most efficient (hopefully they are the same)? Thanks
[ "You could use something like\ndf['col_name'].values.astype('category')\n\n", "If reassigning the column was the slow operation, doing the conversion in-place should speed up the process :\ndf[col_name].astype('category', inplace = True)\n\n" ]
[ 0, 0 ]
[]
[]
[ "dataframe", "dtype", "pandas", "python" ]
stackoverflow_0072807703_dataframe_dtype_pandas_python.txt
Q: How do I redirect from one flask app to another flask app with url parameters I have a Python application in which for one specific API, I am trying to redirect it to another API present in another Flask application. To achieve this, I am using the below code: ` @app.route('/hello') def hello_name(name): return redirect("http://localhost:8000/hello", 302) ` Now, if I try to access my API by appending query parameters like http://localhost:6000/hello?name=Sidharth, it should be redirected to http://localhost:8000/hello?name=Sidharth. Can I get an advice on how this can be done? I looked online and found that most of the posts are advising usage of url_for() but since I don't want to redirect to another view, I don't think url_for() will be beneficial in my case. With the code that I have written now, the query parameters are not being added to the redirected url.
How do I redirect from one flask app to another flask app with url parameters
I have a Python application in which for one specific API, I am trying to redirect it to another API present in another Flask application. To achieve this, I am using the below code: ` @app.route('/hello') def hello_name(name): return redirect("http://localhost:8000/hello", 302) ` Now, if I try to access my API by appending query parameters like http://localhost:6000/hello?name=Sidharth, it should be redirected to http://localhost:8000/hello?name=Sidharth. Can I get an advice on how this can be done? I looked online and found that most of the posts are advising usage of url_for() but since I don't want to redirect to another view, I don't think url_for() will be beneficial in my case. With the code that I have written now, the query parameters are not being added to the redirected url.
[]
[]
[ "Try to use HTTP status code 307 Internal Redirect instead of 302 like below:-\n@app.route('/hello/')\ndef hello_name(name):\n return redirect(url_for('http://localhost:8000/hello', args1=name), code=307)\n\n" ]
[ -1 ]
[ "flask", "flask_restful", "python", "python_3.x", "redirect" ]
stackoverflow_0074528621_flask_flask_restful_python_python_3.x_redirect.txt
Q: How can I use my tensorflow/keras CNN model to predict from my camera (that I loaded in) with this code? code for predicting on live camera It's fairly simple what I am trying to do, loading my tensorflow AI from file. trying to use it to predict on my live webcam (through google.colab). I am trying to predict with the AI I made (using the code in the link), saved and loaded (using tensorflow: model.save and load_model) I have a copy of this and I am loading in my own model instead like this: model = load_model("/content/drive/MyDrive/aifolder") # Load from folder saved to. they download theirs from online using: #face_cascade = cv2.CascadeClassifier(cv2.samples.findFile(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')) under the code titled Webcam Videos, at the very bottom, they start predicting with the AI at: faces = face_cascade.detectMultiScale(gray) I replaced this line with my AI model like this: #predictions = model.detectMultiScale(gray) # ( code Commented out) predictions = model.predict(gray) #Predict through webcam I am unsure how to calibrate this code to be suitable for my tensorflow AI. I really feel that I am missing something very simple to do this (like a simple command). I am a beginner so I am not sure what it should be. edit: I have loaded in my model using model.load_model("filepath here") and I have now done: classes = model.predict(gray) (Instead of the previous way that the cascade model was used) and I get this error: ValueError: Input 0 of layer "model" is incompatible with the layer: expected shape=(None, 224, 224, 3), found shape=(32, 640). I have tried changing the shape which I can change the "640", but I cannot find out how to change the number "32" so that I can process this into the first layer. A: You can use tensorflow.image.resize API to resize the image. The error could be due to a shape mismatch between the Input Layer's shape and the shape of the image that is passed to the model while predicting. import tensorflow as tf img=tf.keras.utils.load_img(path of the image) img=tf.keras.utils.img_to_array(img) print(img.shape) # (272, 185) # Resize the image img=tf.image.resize(img,(224,224)) print(img.shape)# (224,224,3) Then you can predict as follows after loading the model classes = model.predict(img) Thank you!
How can I use my tensorflow/keras CNN model to predict from my camera (that I loaded in) with this code?
code for predicting on live camera It's fairly simple what I am trying to do, loading my tensorflow AI from file. trying to use it to predict on my live webcam (through google.colab). I am trying to predict with the AI I made (using the code in the link), saved and loaded (using tensorflow: model.save and load_model) I have a copy of this and I am loading in my own model instead like this: model = load_model("/content/drive/MyDrive/aifolder") # Load from folder saved to. they download theirs from online using: #face_cascade = cv2.CascadeClassifier(cv2.samples.findFile(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')) under the code titled Webcam Videos, at the very bottom, they start predicting with the AI at: faces = face_cascade.detectMultiScale(gray) I replaced this line with my AI model like this: #predictions = model.detectMultiScale(gray) # ( code Commented out) predictions = model.predict(gray) #Predict through webcam I am unsure how to calibrate this code to be suitable for my tensorflow AI. I really feel that I am missing something very simple to do this (like a simple command). I am a beginner so I am not sure what it should be. edit: I have loaded in my model using model.load_model("filepath here") and I have now done: classes = model.predict(gray) (Instead of the previous way that the cascade model was used) and I get this error: ValueError: Input 0 of layer "model" is incompatible with the layer: expected shape=(None, 224, 224, 3), found shape=(32, 640). I have tried changing the shape which I can change the "640", but I cannot find out how to change the number "32" so that I can process this into the first layer.
[ "You can use tensorflow.image.resize API to resize the image. The error could be due to a shape mismatch between the Input Layer's shape and the shape of the image that is passed to the model while predicting.\nimport tensorflow as tf \n\nimg=tf.keras.utils.load_img(path of the image)\nimg=tf.keras.utils.img_to_array(img)\nprint(img.shape) # (272, 185)\n# Resize the image\nimg=tf.image.resize(img,(224,224))\nprint(img.shape)# (224,224,3)\n\nThen you can predict as follows after loading the model\nclasses = model.predict(img)\n\nThank you!\n" ]
[ 0 ]
[]
[]
[ "artificial_intelligence", "conv_neural_network", "python", "tensorflow", "webcam" ]
stackoverflow_0074511932_artificial_intelligence_conv_neural_network_python_tensorflow_webcam.txt
Q: Averaging of several values I have a dataset (df3) with five columns x, y, r, g and b, although I only need to work with x, y and r. I want to find the average of all the consecutive rows in which the value of r is equal and store it in a database (df_final). To do this, I have generated a code that stores all the values in which r is equal to the one in previous row in a temporary database (df_inter), to later store the average of all the values in the final database (df_final). The code is this one: d = {'x':[1,2,3,4,5,6,7],'y':[1,1,1,1,1,1,1],'r':[2,2,2,1,1,3,2]} df3 = pd.Dataframe(data=d) for i in range(len(df3)): if df3.iloc[i,3] == df3.iloc[i-1,3]: df_inter = pd.DataFrame(columns=['x','y', 'r']) df_inter.append(df3.iloc[i,1],df3.iloc[i,2],df3.iloc[i,3]) df_inter.to_csv(f'Resultados/df_inter.csv', index=False, sep=',') else: df_final.append(df_inter['x'].mean(),df_inter['y'].mean(),df_inter['r'].mean()) del [[df_inter]] gc.collect() df_inter=pd.DataFrame() df_inter = pd.DataFrame(columns=['x','y', 'r']) df_inter.append(df3.iloc[i,1],df3.iloc[i,2],df3.iloc[i,3]) df_final.to_csv(f'Resultados/df_final.csv', index=False, sep=',') The objective is from a dataset for example like this: x y r 1 1 2 2 1 2 3 1 2 4 1 1 5 1 1 6 1 3 7 1 2 Get something like this: x y r 2 1 2 4.5 1 1 6 1 3 7 1 2 Nevertheless, when I execute the code I get this error message: TypeError: cannot concatenate object of type '<class 'numpy.int64'>'; only Series and DataFrame objs are valid I'm not sure what the problem is or even if there is a code more efficient for the purpose. Please, I would be grateful if you could help me. Thank you in advance. Irene I solved it. The right code for my purpose would be: d = {'x':[1,2,3,4,5,6,7],'y':[1,1,1,1,1,1,1],'r':[2,2,2,1,1,3,2]} df3 = pd.Dataframe(data=d) df_inter = pd.DataFrame(columns=['x','y', 'r']) df_final = pd.DataFrame(columns=['x','y','r']) for i in df3.index.values: if df3.iloc[i,2] == df3.iloc[i-1,2]: df_inter = df_inter.append({'x':df3.iloc[i,0],'y':df3.iloc[i,1],'r':df3.iloc[i,2]}, ignore_index=True) else: df_final = df_final.append({'x':df_inter['x'].mean(),'y':df_inter['y'].mean(),'r':df_inter['r'].mean()}, ignore_index=True) df_inter = pd.DataFrame(columns=['x','y', 'r']) df_inter = df_inter.append({'x':df3.iloc[i,0],'y':df3.iloc[i,1],'r':df3.iloc[i,2]}, ignore_index=True) df_final = df_final.append({'x':df_inter['x'].mean(),'y':df_inter['y'].mean(),'r':df_inter['r'].mean()}, ignore_index=True) df_final.to_csv(f'Resultados/df_final.csv', index=False, sep=',') A: You may want to append to the end of the dataframe using df_inter = df_inter.append({'x':df3.iloc[i,1],'y':df3.iloc[i,2],'r':df3.iloc[i,3]}, ignore_index=True) A: If you have some knowledge of SQL, it can be intuitively done using sqldf and pandas: import sqldf import pandas as pd df = pd.DataFrame({"class":[1,1,1,2,2,2,1,2,2,1],"value":[10,10,12,11,15,17,98,23,22,0]}) averages = sqldf.run(""" SELECT class,AVG(value) FROM df GROUP BY class """) The output being: class AVG(value) 0 1 26.0 1 2 17.6 Is that what you are looking for ?
Averaging of several values
I have a dataset (df3) with five columns x, y, r, g and b, although I only need to work with x, y and r. I want to find the average of all the consecutive rows in which the value of r is equal and store it in a database (df_final). To do this, I have generated a code that stores all the values in which r is equal to the one in previous row in a temporary database (df_inter), to later store the average of all the values in the final database (df_final). The code is this one: d = {'x':[1,2,3,4,5,6,7],'y':[1,1,1,1,1,1,1],'r':[2,2,2,1,1,3,2]} df3 = pd.Dataframe(data=d) for i in range(len(df3)): if df3.iloc[i,3] == df3.iloc[i-1,3]: df_inter = pd.DataFrame(columns=['x','y', 'r']) df_inter.append(df3.iloc[i,1],df3.iloc[i,2],df3.iloc[i,3]) df_inter.to_csv(f'Resultados/df_inter.csv', index=False, sep=',') else: df_final.append(df_inter['x'].mean(),df_inter['y'].mean(),df_inter['r'].mean()) del [[df_inter]] gc.collect() df_inter=pd.DataFrame() df_inter = pd.DataFrame(columns=['x','y', 'r']) df_inter.append(df3.iloc[i,1],df3.iloc[i,2],df3.iloc[i,3]) df_final.to_csv(f'Resultados/df_final.csv', index=False, sep=',') The objective is from a dataset for example like this: x y r 1 1 2 2 1 2 3 1 2 4 1 1 5 1 1 6 1 3 7 1 2 Get something like this: x y r 2 1 2 4.5 1 1 6 1 3 7 1 2 Nevertheless, when I execute the code I get this error message: TypeError: cannot concatenate object of type '<class 'numpy.int64'>'; only Series and DataFrame objs are valid I'm not sure what the problem is or even if there is a code more efficient for the purpose. Please, I would be grateful if you could help me. Thank you in advance. Irene I solved it. The right code for my purpose would be: d = {'x':[1,2,3,4,5,6,7],'y':[1,1,1,1,1,1,1],'r':[2,2,2,1,1,3,2]} df3 = pd.Dataframe(data=d) df_inter = pd.DataFrame(columns=['x','y', 'r']) df_final = pd.DataFrame(columns=['x','y','r']) for i in df3.index.values: if df3.iloc[i,2] == df3.iloc[i-1,2]: df_inter = df_inter.append({'x':df3.iloc[i,0],'y':df3.iloc[i,1],'r':df3.iloc[i,2]}, ignore_index=True) else: df_final = df_final.append({'x':df_inter['x'].mean(),'y':df_inter['y'].mean(),'r':df_inter['r'].mean()}, ignore_index=True) df_inter = pd.DataFrame(columns=['x','y', 'r']) df_inter = df_inter.append({'x':df3.iloc[i,0],'y':df3.iloc[i,1],'r':df3.iloc[i,2]}, ignore_index=True) df_final = df_final.append({'x':df_inter['x'].mean(),'y':df_inter['y'].mean(),'r':df_inter['r'].mean()}, ignore_index=True) df_final.to_csv(f'Resultados/df_final.csv', index=False, sep=',')
[ "You may want to append to the end of the dataframe using\n\ndf_inter = df_inter.append({'x':df3.iloc[i,1],'y':df3.iloc[i,2],'r':df3.iloc[i,3]}, ignore_index=True)\n\n\n", "If you have some knowledge of SQL, it can be intuitively done using sqldf and pandas:\nimport sqldf\nimport pandas as pd\n\ndf = pd.DataFrame({\"class\":[1,1,1,2,2,2,1,2,2,1],\"value\":[10,10,12,11,15,17,98,23,22,0]})\n\naverages = sqldf.run(\"\"\"\n SELECT class,AVG(value)\n FROM df\n GROUP BY class\n\"\"\")\n\nThe output being:\n class AVG(value)\n\n0 1 26.0\n1 2 17.6\n\nIs that what you are looking for ?\n" ]
[ 0, 0 ]
[]
[]
[ "database", "pandas", "python" ]
stackoverflow_0074530800_database_pandas_python.txt
Q: How to get calendar years as column names and month and day as index for one timeseries I have looked for solutions but seem to find none that point me in the right direction, hopefully, someone on here can help. I have a stock price data set, with a frequency of Month Start. I am trying to get an output where the calendar years are the column names, and the day and month will be the index (there will only be 12 rows since it is monthly data). The rows will be filled with the stock prices corresponding to the year and month. I, unfortunately, have no code since I have looked at for loops, groupby, etc but can't seem to figure this one out. A: You might want to split the date into month and year and to apply a pivot: s = pd.to_datetime(df.index) out = (df .assign(year=s.year, month=s.month) .pivot_table(index='month', columns='year', values='Close', fill_value=0) ) output: year 2003 2004 month 1 0 2 2 0 3 3 0 4 12 1 0 Used input: df = pd.DataFrame({'Close': [1,2,3,4]}, index=['2003-12-01', '2004-01-01', '2004-02-01', '2004-03-01']) A: You need multiple steps to do that. First split your column into the right format. Then convert this column into two separate columns. Then pivot the table accordingly. import pandas as pd # Test Dataframe df = pd.DataFrame({'Date': ['2003-12-01', '2004-01-01', '2004-02-01', '2004-12-01'], 'Close': [6.661, 7.053, 6.625, 8.999]}) # Split datestring into list of form [year, month-day] df = df.assign(Date=df.Date.str.split(pat='-', n=1)) # Separate date-list column into two columns df = pd.DataFrame(df.Date.to_list(), columns=['Year', 'Date'], index=df.index).join(df.Close) # Pivot the table df = df.pivot(columns='Year', index='Date') df Output: Close Year 2003 2004 Date 01-01 NaN 7.053 02-01 NaN 6.625 12-01 6.661 8.999
How to get calendar years as column names and month and day as index for one timeseries
I have looked for solutions but seem to find none that point me in the right direction, hopefully, someone on here can help. I have a stock price data set, with a frequency of Month Start. I am trying to get an output where the calendar years are the column names, and the day and month will be the index (there will only be 12 rows since it is monthly data). The rows will be filled with the stock prices corresponding to the year and month. I, unfortunately, have no code since I have looked at for loops, groupby, etc but can't seem to figure this one out.
[ "You might want to split the date into month and year and to apply a pivot:\ns = pd.to_datetime(df.index)\n\nout = (df\n .assign(year=s.year, month=s.month)\n .pivot_table(index='month', columns='year', values='Close', fill_value=0)\n)\n\noutput:\nyear 2003 2004\nmonth \n1 0 2\n2 0 3\n3 0 4\n12 1 0\n\nUsed input:\ndf = pd.DataFrame({'Close': [1,2,3,4]},\n index=['2003-12-01', '2004-01-01', '2004-02-01', '2004-03-01'])\n\n", "You need multiple steps to do that.\nFirst split your column into the right format.\nThen convert this column into two separate columns.\nThen pivot the table accordingly.\nimport pandas as pd\n\n# Test Dataframe\ndf = pd.DataFrame({'Date': ['2003-12-01', '2004-01-01', '2004-02-01', '2004-12-01'],\n 'Close': [6.661, 7.053, 6.625, 8.999]})\n\n# Split datestring into list of form [year, month-day]\ndf = df.assign(Date=df.Date.str.split(pat='-', n=1))\n# Separate date-list column into two columns\ndf = pd.DataFrame(df.Date.to_list(), columns=['Year', 'Date'], index=df.index).join(df.Close)\n# Pivot the table\ndf = df.pivot(columns='Year', index='Date')\ndf\n\nOutput:\n Close \nYear 2003 2004\nDate \n01-01 NaN 7.053\n02-01 NaN 6.625\n12-01 6.661 8.999\n\n" ]
[ 3, 1 ]
[]
[]
[ "dataframe", "pandas", "python", "python_3.x" ]
stackoverflow_0074530461_dataframe_pandas_python_python_3.x.txt
Q: Proper way to implement user input with Sympy? I am currently working on creating a python script that will do a series of calculations based on the formula entered by the user; however, it is not working as expected? I have tried the following: init_printing(use_unicode=True) x, y = symbols('x y', real = True) userinput = sympify(input("testinput: ")) x_diff = diff(userinput, x) print(x_diff) However, this always returns zero, but when I write the input directly, e.g. init_printing(use_unicode=True) x, y = symbols('x y', real = True) userinput = x**0.5+y x_diff = diff(userinput, x) print(x_diff) It works flawlessly, what am I doing wrong here? Thanks! A: Adding locals parameter in sympify function will help you. Here is a working code, based on yours : from sympy import * init_printing(use_unicode=True) x, y = symbols('x y', real = True) userinput = input("testinput: ") locals = {'x':x, 'y':y} sympified = sympify(userinput, locals=locals) print(f'derivate /x : = \n {diff(sympified, x)} \n derivative / y : \n {diff(sympified, y)}') Output: testinput: cos(y) + 2*x derivate /x : 2 derivative /y : -sin(y)
Proper way to implement user input with Sympy?
I am currently working on creating a python script that will do a series of calculations based on the formula entered by the user; however, it is not working as expected? I have tried the following: init_printing(use_unicode=True) x, y = symbols('x y', real = True) userinput = sympify(input("testinput: ")) x_diff = diff(userinput, x) print(x_diff) However, this always returns zero, but when I write the input directly, e.g. init_printing(use_unicode=True) x, y = symbols('x y', real = True) userinput = x**0.5+y x_diff = diff(userinput, x) print(x_diff) It works flawlessly, what am I doing wrong here? Thanks!
[ "Adding locals parameter in sympify function will help you. Here is a working code, based on yours :\nfrom sympy import *\n\ninit_printing(use_unicode=True)\n\nx, y = symbols('x y', real = True)\nuserinput = input(\"testinput: \")\nlocals = {'x':x, 'y':y}\nsympified = sympify(userinput, locals=locals)\nprint(f'derivate /x : = \\n {diff(sympified, x)} \\n derivative / y : \\n {diff(sympified, y)}')\n\nOutput:\ntestinput: cos(y) + 2*x\n\nderivate /x :\n2\nderivative /y :\n-sin(y)\n\n" ]
[ 3 ]
[]
[]
[ "python", "sympy" ]
stackoverflow_0074530358_python_sympy.txt
Q: is it possible to override the size of a frame when something is inside? I am trying to have two identically sized frames inside a grid, like this: enter image description here i have control over the frame's size when nothing is in it but when i add something into the frame, i lose control over the size itself and it adapts to the size of stuff placed in it. Any way to control the size of a frame when something is inside it? size of frame when something is inside: when nothing is in the frame from tkinter import * import random as rn def value_change(choice): global user_choice user_choice=choice return(user_choice) user_choice=4 hlavni=Tk() jmeno=Label(hlavni,text='Kámen, nůžky,papír',font=("arial","20","bold"),justify='center') jmeno.grid(row=0,column=0,columnspan=2) hrac_name=Label(hlavni,text='Hráč',justify="left") hrac_name.grid(row=1,column=0) pc_name=Label(hlavni,text="Počítač") pc_name.grid(row=1,column=1) hrac_f=Frame(hlavni,relief="ridge",borderwidth=4,width=400,height=400,padx=10,pady=10) hrac_f.grid(row=2,column=0,padx=10,pady=10) """ stuff inside the frame Volba=Label(hrac_f,text="Zadej svou volbu",font=("arial",10,"bold")).grid(row=0,column=0) values=Label(hrac_f,text="Vyber: Kámen, Nůžky, Papír",font=("arial",10)).grid(row=1,column=0) choice_f=Frame(hrac_f) choice_f.grid(row=3,column=0) stone=Button(choice_f,text="kámen",relief="ridge",command=lambda: value_change(0)) stone.grid(row=0,column=0) scissors=Button(choice_f,text="nůžky",relief="ridge",command=lambda: value_change(1)) scissors.grid(row=0,column=1) paper=Button(choice_f,text="papír",relief="ridge",command=lambda: value_change(2)) paper.grid(row=0,column=2) """ hlavni.mainloop() A: By default, when adding widgets to a frame using .grid() or .pack(), the size of the frame will be adjusted to fit all the widgets. To change this default behavior, call .grid_propagate(0) or .pack_propagate(0) on the frame. For your case, as .grid() is used on those widgets inside hrac_f frame, then hrac_f.grid_propagate(0) should be called: ... hrac_f=Frame(hlavni,relief="ridge",borderwidth=4,width=400,height=400,padx=10,pady=10) hrac_f.grid(row=2,column=0,padx=10,pady=10) hrac_f.grid_propagate(0) # disable auto size adjustment when adding widgets using grid() """ stuff inside the frame """ Volba=Label(hrac_f,text="Zadej svou volbu",font=("arial",10,"bold")).grid(row=0,column=0) values=Label(hrac_f,text="Vyber: Kámen, Nůžky, Papír",font=("arial",10)).grid(row=1,column=0) choice_f=Frame(hrac_f) choice_f.grid(row=3,column=0) stone=Button(choice_f,text="kámen",relief="ridge",command=lambda: value_change(0)) stone.grid(row=0,column=0) scissors=Button(choice_f,text="nůžky",relief="ridge",command=lambda: value_change(1)) scissors.grid(row=0,column=1) paper=Button(choice_f,text="papír",relief="ridge",command=lambda: value_change(2)) paper.grid(row=0,column=2) hlavni.mainloop() Result: A: You can use frames to change placement manager like grid to place or pack or vice versa. Like you can fix sizes with place and put them inside whatever you want. import tkinter as tk def value_change(choice): global user_choice user_choice=choice return(user_choice) user_choice=4 hlavni=tk.Tk() hlavni.geometry("600x100")#to have enough space to prevent collision. mainFrame = tk.Frame(hlavni,relief="ridge",borderwidth=4) #means that. start at parents x and y beginning. Have height of parents %95 height(hlavni, main window in this case.) #have %50 width of parent(hlavni) width. If parent width is 800 width then this mainframe has 400 pixel width #we set width 600 in this example so our mainframe widget has 300 width. mainFrame.place(relx=0,rely=0,relheight=0.95,relwidth=0.5) jmeno=tk.Label(mainFrame,text='Kámen, nůžky,papír',font=("arial","20","bold"),justify='center') jmeno.grid(row=0,column=0,columnspan=2) hrac_name=tk.Label(mainFrame,text='Hráč',justify="left") hrac_name.grid(row=1,column=0) pc_name=tk.Label(mainFrame,text="Počítač") pc_name.grid(row=1,column=1) hrac_f=tk.Frame(hlavni,relief="ridge",borderwidth=4) #means that put hrac_f frame where is half(0.5, %50) of hlavni and and have width of %49 of hlavni. #have %95 height of hlavni. We let a bit space at the edges so that we can see borders of frames. hrac_f.place(relx=0.5,rely=0,relheight=0.95,relwidth=0.49) Volba=tk.Label(hrac_f,text="Zadej svou volbu",font=("arial",10,"bold")).grid(row=0,column=0) values=tk.Label(hrac_f,text="Vyber: Kámen, Nůžky, Papír",font=("arial",10)).grid(row=1,column=0) choice_f=tk.Frame(hrac_f) choice_f.grid(row=3,column=0) stone=tk.Button(choice_f,text="kámen",relief="ridge",command=lambda: value_change(0)) stone.grid(row=0,column=0) scissors=tk.Button(choice_f,text="nůžky",relief="ridge",command=lambda: value_change(1)) scissors.grid(row=0,column=1) paper=tk.Button(choice_f,text="papír",relief="ridge",command=lambda: value_change(2)) paper.grid(row=0,column=2) hlavni.mainloop()
is it possible to override the size of a frame when something is inside?
I am trying to have two identically sized frames inside a grid, like this: enter image description here i have control over the frame's size when nothing is in it but when i add something into the frame, i lose control over the size itself and it adapts to the size of stuff placed in it. Any way to control the size of a frame when something is inside it? size of frame when something is inside: when nothing is in the frame from tkinter import * import random as rn def value_change(choice): global user_choice user_choice=choice return(user_choice) user_choice=4 hlavni=Tk() jmeno=Label(hlavni,text='Kámen, nůžky,papír',font=("arial","20","bold"),justify='center') jmeno.grid(row=0,column=0,columnspan=2) hrac_name=Label(hlavni,text='Hráč',justify="left") hrac_name.grid(row=1,column=0) pc_name=Label(hlavni,text="Počítač") pc_name.grid(row=1,column=1) hrac_f=Frame(hlavni,relief="ridge",borderwidth=4,width=400,height=400,padx=10,pady=10) hrac_f.grid(row=2,column=0,padx=10,pady=10) """ stuff inside the frame Volba=Label(hrac_f,text="Zadej svou volbu",font=("arial",10,"bold")).grid(row=0,column=0) values=Label(hrac_f,text="Vyber: Kámen, Nůžky, Papír",font=("arial",10)).grid(row=1,column=0) choice_f=Frame(hrac_f) choice_f.grid(row=3,column=0) stone=Button(choice_f,text="kámen",relief="ridge",command=lambda: value_change(0)) stone.grid(row=0,column=0) scissors=Button(choice_f,text="nůžky",relief="ridge",command=lambda: value_change(1)) scissors.grid(row=0,column=1) paper=Button(choice_f,text="papír",relief="ridge",command=lambda: value_change(2)) paper.grid(row=0,column=2) """ hlavni.mainloop()
[ "By default, when adding widgets to a frame using .grid() or .pack(), the size of the frame will be adjusted to fit all the widgets.\nTo change this default behavior, call .grid_propagate(0) or .pack_propagate(0) on the frame.\nFor your case, as .grid() is used on those widgets inside hrac_f frame, then hrac_f.grid_propagate(0) should be called:\n...\nhrac_f=Frame(hlavni,relief=\"ridge\",borderwidth=4,width=400,height=400,padx=10,pady=10)\nhrac_f.grid(row=2,column=0,padx=10,pady=10)\n\nhrac_f.grid_propagate(0) # disable auto size adjustment when adding widgets using grid()\n\n\"\"\" stuff inside the frame \"\"\"\nVolba=Label(hrac_f,text=\"Zadej svou volbu\",font=(\"arial\",10,\"bold\")).grid(row=0,column=0)\nvalues=Label(hrac_f,text=\"Vyber: Kámen, Nůžky, Papír\",font=(\"arial\",10)).grid(row=1,column=0)\n\nchoice_f=Frame(hrac_f)\nchoice_f.grid(row=3,column=0)\n\nstone=Button(choice_f,text=\"kámen\",relief=\"ridge\",command=lambda: value_change(0))\nstone.grid(row=0,column=0)\n\nscissors=Button(choice_f,text=\"nůžky\",relief=\"ridge\",command=lambda: value_change(1))\nscissors.grid(row=0,column=1)\n\npaper=Button(choice_f,text=\"papír\",relief=\"ridge\",command=lambda: value_change(2))\npaper.grid(row=0,column=2)\n\nhlavni.mainloop()\n\nResult:\n\n", "You can use frames to change placement manager like grid to place or pack or vice versa. Like you can fix sizes with place and put them inside whatever you want.\n\n\nimport tkinter as tk\n\ndef value_change(choice):\n global user_choice\n user_choice=choice\n return(user_choice)\n\nuser_choice=4\nhlavni=tk.Tk()\nhlavni.geometry(\"600x100\")#to have enough space to prevent collision.\n\nmainFrame = tk.Frame(hlavni,relief=\"ridge\",borderwidth=4)\n#means that. start at parents x and y beginning. Have height of parents %95 height(hlavni, main window in this case.)\n#have %50 width of parent(hlavni) width. If parent width is 800 width then this mainframe has 400 pixel width\n#we set width 600 in this example so our mainframe widget has 300 width.\nmainFrame.place(relx=0,rely=0,relheight=0.95,relwidth=0.5)\n\njmeno=tk.Label(mainFrame,text='Kámen, nůžky,papír',font=(\"arial\",\"20\",\"bold\"),justify='center')\njmeno.grid(row=0,column=0,columnspan=2)\n\nhrac_name=tk.Label(mainFrame,text='Hráč',justify=\"left\")\nhrac_name.grid(row=1,column=0)\npc_name=tk.Label(mainFrame,text=\"Počítač\")\npc_name.grid(row=1,column=1)\n\n\n\n\nhrac_f=tk.Frame(hlavni,relief=\"ridge\",borderwidth=4)\n\n#means that put hrac_f frame where is half(0.5, %50) of hlavni and and have width of %49 of hlavni.\n#have %95 height of hlavni. We let a bit space at the edges so that we can see borders of frames.\nhrac_f.place(relx=0.5,rely=0,relheight=0.95,relwidth=0.49)\n\n\nVolba=tk.Label(hrac_f,text=\"Zadej svou volbu\",font=(\"arial\",10,\"bold\")).grid(row=0,column=0)\nvalues=tk.Label(hrac_f,text=\"Vyber: Kámen, Nůžky, Papír\",font=(\"arial\",10)).grid(row=1,column=0)\n\nchoice_f=tk.Frame(hrac_f)\nchoice_f.grid(row=3,column=0)\n\nstone=tk.Button(choice_f,text=\"kámen\",relief=\"ridge\",command=lambda: value_change(0))\nstone.grid(row=0,column=0)\n\nscissors=tk.Button(choice_f,text=\"nůžky\",relief=\"ridge\",command=lambda: value_change(1))\nscissors.grid(row=0,column=1)\n\npaper=tk.Button(choice_f,text=\"papír\",relief=\"ridge\",command=lambda: value_change(2))\npaper.grid(row=0,column=2) \n\n\nhlavni.mainloop()\n\n" ]
[ 0, 0 ]
[]
[]
[ "python", "tkinter", "tkinter_layout" ]
stackoverflow_0074528690_python_tkinter_tkinter_layout.txt
Q: I want to create lookup data using apache_beam.utils.shared module but it gives error TypeError: cannot create weak reference to 'list' object ` import apache_beam as beam from apache_beam.utils import shared from log_elements import LogElements class GetNthStringFn(beam.DoFn): def __init__(self, shared_handle): self._shared_handle = shared_handle def process(self, element): def initialize_list(): # Build the giant initial list. return [str(i) for i in range(1000000)] giant_list = self._shared_handle.acquire(initialize_list) yield giant_list[element] with beam.Pipeline() as p: shared_handle = shared.Shared() (p | beam.Create([2, 4, 6, 8]) | beam.ParDo(GetNthStringFn(shared_handle)) | LogElements()) ` I tried example given in apache beam documentation. Link https://beam.apache.org/releases/pydoc/2.24.0/apache_beam.utils.shared.html but got below error File "/opt/playground/backend/executable_files/4a6babb9-9ea4-4a70-881d-559057592090/4a6babb9-9ea4-4a70-881d-559057592090.py", line 39, in process giant_list = self._shared_handle.acquire(initialize_list) File "/usr/local/lib/python3.7/site-packages/apache_beam/utils/shared.py", line 312, in acquire return _shared_map.acquire(self._key, constructor_fn, tag) File "/usr/local/lib/python3.7/site-packages/apache_beam/utils/shared.py", line 253, in acquire result = control_block.acquire(constructor_fn, tag) File "/usr/local/lib/python3.7/site-packages/apache_beam/utils/shared.py", line 149, in acquire self._ref = weakref.ref(result) TypeError: cannot create weak reference to 'list' object [while running 'ParDo(GetNthStringFn)'] A: You don't reference the good link and version, the Beam version 2.24.0 is too old. Check with this code and this link : # Several built-in types such as list and dict do not directly support weak # references but can add support through subclassing: # https://docs.python.org/3/library/weakref.html class WeakRefList(list): pass class GetNthStringFn(beam.DoFn): def __init__(self): self._shared_handle = shared.Shared() def setup(self): # setup is a good place to initialize transient in-memory resources. def initialize_list(): # Build the giant initial list. return WeakRefList([str(i) for i in range(1000000)]) self._giant_list = self._shared_handle.acquire(initialize_list) def process(self, element): yield self._giant_list[element] p = beam.Pipeline() (p | beam.Create([2, 4, 6, 8]) | beam.ParDo(GetNthStringFn())) To be able to execute this code, you need to install the Beam 2.42.0 version. You can install it with pip in your virtual env.
I want to create lookup data using apache_beam.utils.shared module but it gives error TypeError: cannot create weak reference to 'list' object
` import apache_beam as beam from apache_beam.utils import shared from log_elements import LogElements class GetNthStringFn(beam.DoFn): def __init__(self, shared_handle): self._shared_handle = shared_handle def process(self, element): def initialize_list(): # Build the giant initial list. return [str(i) for i in range(1000000)] giant_list = self._shared_handle.acquire(initialize_list) yield giant_list[element] with beam.Pipeline() as p: shared_handle = shared.Shared() (p | beam.Create([2, 4, 6, 8]) | beam.ParDo(GetNthStringFn(shared_handle)) | LogElements()) ` I tried example given in apache beam documentation. Link https://beam.apache.org/releases/pydoc/2.24.0/apache_beam.utils.shared.html but got below error File "/opt/playground/backend/executable_files/4a6babb9-9ea4-4a70-881d-559057592090/4a6babb9-9ea4-4a70-881d-559057592090.py", line 39, in process giant_list = self._shared_handle.acquire(initialize_list) File "/usr/local/lib/python3.7/site-packages/apache_beam/utils/shared.py", line 312, in acquire return _shared_map.acquire(self._key, constructor_fn, tag) File "/usr/local/lib/python3.7/site-packages/apache_beam/utils/shared.py", line 253, in acquire result = control_block.acquire(constructor_fn, tag) File "/usr/local/lib/python3.7/site-packages/apache_beam/utils/shared.py", line 149, in acquire self._ref = weakref.ref(result) TypeError: cannot create weak reference to 'list' object [while running 'ParDo(GetNthStringFn)']
[ "You don't reference the good link and version, the Beam version 2.24.0 is too old.\nCheck with this code and this link :\n# Several built-in types such as list and dict do not directly support weak\n# references but can add support through subclassing:\n# https://docs.python.org/3/library/weakref.html\nclass WeakRefList(list):\n pass\n\nclass GetNthStringFn(beam.DoFn):\n def __init__(self):\n self._shared_handle = shared.Shared()\n\n def setup(self):\n # setup is a good place to initialize transient in-memory resources.\n def initialize_list():\n # Build the giant initial list.\n return WeakRefList([str(i) for i in range(1000000)])\n\n self._giant_list = self._shared_handle.acquire(initialize_list)\n\n def process(self, element):\n yield self._giant_list[element]\n\np = beam.Pipeline()\n(p | beam.Create([2, 4, 6, 8])\n | beam.ParDo(GetNthStringFn()))\n\nTo be able to execute this code, you need to install the Beam 2.42.0 version. You can install it with pip in your virtual env.\n" ]
[ 2 ]
[]
[]
[ "apache_beam", "google_cloud_dataflow", "python" ]
stackoverflow_0074528574_apache_beam_google_cloud_dataflow_python.txt
Q: Can anyone reduce time complexity of this code You are given three integers A, B, and C. You are allowed to perform the following operation any number of times (possibly zero). • Choose any integer X such that X ≤ max (A,B, C), and replace A with A^X, B with B^X, and C with C^X. Here denote Bitwise XOR operation. Find the maximum possible value of A+B+C. A=2 B=2 C=2 def maxSum(a,b,c): list=[] l=[a,b,c] l.sort() if a==b==c: for x in range(int(a/2),l[-1]): new=((a^x)+(b^x)+(c^x)) list.append(new) return list[-1] else: for x in range(l[1],l[-1]): new=((a^x)+(b^x)+(c^x)) list.append(new) return list[-1] maximum=maxSum(A,B,C) print(maximum) How to make the code run faster? I tried using for loop but the runtime was so much. I want to know how to reduce runtime. What are the modifications needed. A: Try this: def max_sum(a, b, c): for j in range(int.bit_length(max(a, b, c))): x = 2**j if sum((n & 2**j) >> j for n in (a, b, c)) < 2 else 0 a = a ^ x b = b ^ x c = c ^ x return a + b + c So here you perform a number of operations equal to the number of bits of the largest number. x is either a power of 2 or 0. Example: >>> max_sum(8, 3, 5) 30
Can anyone reduce time complexity of this code
You are given three integers A, B, and C. You are allowed to perform the following operation any number of times (possibly zero). • Choose any integer X such that X ≤ max (A,B, C), and replace A with A^X, B with B^X, and C with C^X. Here denote Bitwise XOR operation. Find the maximum possible value of A+B+C. A=2 B=2 C=2 def maxSum(a,b,c): list=[] l=[a,b,c] l.sort() if a==b==c: for x in range(int(a/2),l[-1]): new=((a^x)+(b^x)+(c^x)) list.append(new) return list[-1] else: for x in range(l[1],l[-1]): new=((a^x)+(b^x)+(c^x)) list.append(new) return list[-1] maximum=maxSum(A,B,C) print(maximum) How to make the code run faster? I tried using for loop but the runtime was so much. I want to know how to reduce runtime. What are the modifications needed.
[ "Try this:\ndef max_sum(a, b, c):\n for j in range(int.bit_length(max(a, b, c))):\n x = 2**j if sum((n & 2**j) >> j for n in (a, b, c)) < 2 else 0\n a = a ^ x\n b = b ^ x\n c = c ^ x\n return a + b + c\n\nSo here you perform a number of operations equal to the number of bits of the largest number. x is either a power of 2 or 0.\nExample:\n>>> max_sum(8, 3, 5)\n30\n\n" ]
[ 0 ]
[]
[]
[ "binary_search", "linear_search", "max", "python", "sum" ]
stackoverflow_0074522895_binary_search_linear_search_max_python_sum.txt
Q: I can't able to install the psycopg2 in python 3.10.8 in ubuntu-20 | ./psycopg/psycopg.h:36:10: fatal error: libpq-fe.h: No such file or directory I'm Using python3.10.8 & ubuntu-20, i have tried so many commands But I can't able to fixe that. Error: r-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DPSYCOPG_VERSION=2.9.1 (dt dec pq3 ext lo64) -DPSYCOPG_DEBUG=1 -DPG_VERSION_NUM=120012 -DHAVE_LO64=1 -DPSYCOPG_DEBUG=1 -I/home/softsuave/Downloads/charityape/skraggle-main (2)/skraggle-main/venv/include -I/usr/include/python3.10 -I. -I/usr/include/postgresql -I/usr/include/postgresql/12/server -I/usr/include/libxml2 -I/usr/include/mit-krb5 -c psycopg/adapter_asis.c -o build/temp.linux-x86_64-3.10/psycopg/adapter_asis.o -Wdeclaration-after-statement In file included from psycopg/adapter_asis.c:28: ./psycopg/psycopg.h:36:10: fatal error: libpq-fe.h: No such file or directory 36 | #include <libpq-fe.h> | ^~~~~~~~~~~~ compilation terminated. It appears you are missing some prerequisite to build the package from source. You may install a binary package by installing 'psycopg2-binary' from PyPI. If you want to install psycopg2 from source, please install the packages required for the build and try again. For further information please check the 'doc/src/install.rst' file (also at <https://www.psycopg.org/docs/install.html>). error: command '/usr/bin/x86_64-linux-gnu-gcc' failed with exit code 1 [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: legacy-install-failure × Encountered error while trying to install package. ╰─> psycopg2 note: This is an issue with the package mentioned above, not pip. hint: See above for output from the failure. Commands I had Tried sudo apt-get install python3-dev sudo apt-get install python3.10-dev Reference Documentations CLICK HERE A: try pip install psycopg2-binary or you can try this sudo apt-get install libpq-dev
I can't able to install the psycopg2 in python 3.10.8 in ubuntu-20 | ./psycopg/psycopg.h:36:10: fatal error: libpq-fe.h: No such file or directory
I'm Using python3.10.8 & ubuntu-20, i have tried so many commands But I can't able to fixe that. Error: r-strong -Wformat -Werror=format-security -g -fwrapv -O2 -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -DPSYCOPG_VERSION=2.9.1 (dt dec pq3 ext lo64) -DPSYCOPG_DEBUG=1 -DPG_VERSION_NUM=120012 -DHAVE_LO64=1 -DPSYCOPG_DEBUG=1 -I/home/softsuave/Downloads/charityape/skraggle-main (2)/skraggle-main/venv/include -I/usr/include/python3.10 -I. -I/usr/include/postgresql -I/usr/include/postgresql/12/server -I/usr/include/libxml2 -I/usr/include/mit-krb5 -c psycopg/adapter_asis.c -o build/temp.linux-x86_64-3.10/psycopg/adapter_asis.o -Wdeclaration-after-statement In file included from psycopg/adapter_asis.c:28: ./psycopg/psycopg.h:36:10: fatal error: libpq-fe.h: No such file or directory 36 | #include <libpq-fe.h> | ^~~~~~~~~~~~ compilation terminated. It appears you are missing some prerequisite to build the package from source. You may install a binary package by installing 'psycopg2-binary' from PyPI. If you want to install psycopg2 from source, please install the packages required for the build and try again. For further information please check the 'doc/src/install.rst' file (also at <https://www.psycopg.org/docs/install.html>). error: command '/usr/bin/x86_64-linux-gnu-gcc' failed with exit code 1 [end of output] note: This error originates from a subprocess, and is likely not a problem with pip. error: legacy-install-failure × Encountered error while trying to install package. ╰─> psycopg2 note: This is an issue with the package mentioned above, not pip. hint: See above for output from the failure. Commands I had Tried sudo apt-get install python3-dev sudo apt-get install python3.10-dev Reference Documentations CLICK HERE
[ "try\npip install psycopg2-binary\n\nor you can try this\nsudo apt-get install libpq-dev\n\n" ]
[ 1 ]
[]
[]
[ "psycopg2", "python" ]
stackoverflow_0074530944_psycopg2_python.txt
Q: Python: how to merge two pandas dataframes with condition I have two dataframes like the following df1 A B 0 0 3 1 0 2 2 1 5 3 1 3 4 2 5 5 'Ciao' 'log' 6 3 4 df2 A B 0 0 -1 1 0 20 2 1 -2 3 1 33 4 2 17 I want to merge the two dataframes in order that the if A==0 keep the values of df1 and otherwise keep the values of df2. At the end, I would like something like the follwing df2 A B 0 0 3 1 0 2 2 1 -2 3 1 33 4 2 17 A: Assuming the dataframes are aligned (and that the duplicated index 3 in df1 is a typo), you do not want a merge but rather a conditional using where: out = df1.where(df1['A'].eq(0), df2) Output: A B 0 0 3 1 0 2 2 1 -2 3 1 33 4 2 17 NB. if you really want a merge, you have to further explain the logic of the merge and provide a non-trivial example. Updated example: You seem to still have partially aligned indices, but want to get the intersection: out = (df1.where(df1['A'].eq(0), df2) .loc[df1.index.intersection(df2.index)] ) Or: out = (df1.reindex_like(df2) .where(df1['A'].eq(0), df2) ) output: A B 0 0.0 -1.0 1 0.0 20.0 2 1.0 -2.0 3 1.0 33.0 4 2.0 17.0
Python: how to merge two pandas dataframes with condition
I have two dataframes like the following df1 A B 0 0 3 1 0 2 2 1 5 3 1 3 4 2 5 5 'Ciao' 'log' 6 3 4 df2 A B 0 0 -1 1 0 20 2 1 -2 3 1 33 4 2 17 I want to merge the two dataframes in order that the if A==0 keep the values of df1 and otherwise keep the values of df2. At the end, I would like something like the follwing df2 A B 0 0 3 1 0 2 2 1 -2 3 1 33 4 2 17
[ "Assuming the dataframes are aligned (and that the duplicated index 3 in df1 is a typo), you do not want a merge but rather a conditional using where:\nout = df1.where(df1['A'].eq(0), df2)\n\nOutput:\n A B\n0 0 3\n1 0 2\n2 1 -2\n3 1 33\n4 2 17\n\nNB. if you really want a merge, you have to further explain the logic of the merge and provide a non-trivial example.\nUpdated example:\nYou seem to still have partially aligned indices, but want to get the intersection:\nout = (df1.where(df1['A'].eq(0), df2)\n .loc[df1.index.intersection(df2.index)]\n )\n\nOr:\nout = (df1.reindex_like(df2)\n .where(df1['A'].eq(0), df2)\n )\n\noutput:\n A B\n0 0.0 -1.0\n1 0.0 20.0\n2 1.0 -2.0\n3 1.0 33.0\n4 2.0 17.0\n\n" ]
[ 4 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074530992_pandas_python.txt
Q: Changing the Dataframe to a TimeSeries Array [Python] I'm trying to change a date frame with the following contents: Date Change 1802 2017-09-14 -1.14% 462 2021-05-16 NaN 935 2020-01-29 0.04% 713 2020-09-07 2.39% 1471 2018-08-11 NaN [1460 rows × 2 columns] Into this: TimeSeries (DataArray) (Month: 144component: 1sample: 1) array([[[112.]], [[118.]], [[132.]], [[129.]], [[121.]], [[135.]], [[148.]], [[148.]], [[136.]], Coordinates: Month (Month) datetime64[ns]. 2019-01-01 ... 2021-12-01 component (component) object 'Change' Attributes: static_covariates: None hierarchy: None In order to run a neural network model on multiple time series. Any help or advice is greatly appreciated! A: The solution required removing the '%' sign from the column values. Then converting the column to a float. ftse_change['Change'] = ftse_change['Change'].str.rstrip('%').astype('float') / 100.0 did the trick
Changing the Dataframe to a TimeSeries Array [Python]
I'm trying to change a date frame with the following contents: Date Change 1802 2017-09-14 -1.14% 462 2021-05-16 NaN 935 2020-01-29 0.04% 713 2020-09-07 2.39% 1471 2018-08-11 NaN [1460 rows × 2 columns] Into this: TimeSeries (DataArray) (Month: 144component: 1sample: 1) array([[[112.]], [[118.]], [[132.]], [[129.]], [[121.]], [[135.]], [[148.]], [[148.]], [[136.]], Coordinates: Month (Month) datetime64[ns]. 2019-01-01 ... 2021-12-01 component (component) object 'Change' Attributes: static_covariates: None hierarchy: None In order to run a neural network model on multiple time series. Any help or advice is greatly appreciated!
[ "The solution required removing the '%' sign from the column values. Then converting the column to a float.\nftse_change['Change'] = ftse_change['Change'].str.rstrip('%').astype('float') / 100.0\n\ndid the trick\n\n" ]
[ 0 ]
[]
[]
[ "arrays", "python" ]
stackoverflow_0074482778_arrays_python.txt
Q: StopIteration Error in pythoncode while reading csv data I am writing a program to read csv file. I have craeted a reader object and calling next() on it gives me the header row. But when I am calling it again it gives StopIteration error although there are rows in the csv file.I am doing file.seek(0) then it is working fine. Anyone please explains this to me? A snapshot of code is given below: with open(file,'r') as f: reader = csv.reader(f) header = next(reader) result = [] for colname in header[2:]: col_index = header.index(colname) # f.seek(0) next(reader) A: You're calling next once for each column (except the first two). So, if you have, say, 10 columns, it's going to try to read 8 rows. If you have 20 rows, that's not going to raise an exception, but you'll be ignoring the last 12 rows, which you probably don't want. On the other hand, if you have only 5 rows, it's going to raise when trying to read the 6th row. The reason the f.seek(0) prevents the exception is that it resets the file back to the start before each next, so you just read the header row over and over, ignoring everything else in the file. It doesn't raise anything, but it's not doing useful. What you probably wanted is something like this: with open(file,'r') as f: reader = csv.reader(f) header = next(reader) result = [] for row in reader: for col_index, colname in enumerate(header)[2:]: value = row[col_index] result.append(do_something_with(value, colname)) This reads every row exactly once, and does something with each column but the first two of each row. From a comment, what you actually want to do is find the maximum value for each column. So, you do need to iterate over the columns–and then, within each column, you need to iterate over the rows. A csv.reader is an iterator, which means you can only iterate over it once. So, if you just do this the obvious way, it won't work: maxes = {} with open(file) as f: reader = csv.reader(f) header = next(reader) for col_index, colname in enumerate(header)[2:]: maxes[colname] = max(reader, key=operator.itemgetter(col_index)) The first column will read whatever's left after reading the header, which is good. The next column will read whatever's left after reading the whole file, which is nothing. So, how can you fix this? One way is to re-create the iterator each time through the outer loop: maxes = {} with open(file) as f: reader = csv.reader(f) header = next(reader) for col_index, colname in enumerate(header)[2:]: with open(file) as f: reader = csv.reader(f) next(reader) maxes[colname] = max(reader, key=lambda row: float(row[col_index])) The problem with this is that you're reading the whole file N times, and reading the file off disk is probably by far the slowest thing your program does. What you were attempting to do with f.seek(0) is a trick that depends on how file objects and csv.reader objects work. While file objects are iterators, they're special, in that they have a way to reset them to the beginning (or to save a position and return to it later). And csv.reader objects are basically simple wrappers around file objects, so if you reset the file, you also reset the reader. (It's not clear that this is guaranteed to work, but if you know how csv works, you can probably convince yourself that in practice it's safe.) So: maxes = {} with open(file) as f: reader = csv.reader(f) header = next(reader) for col_index, colname in enumerate(header)[2:]: f.seek(0) next(reader) maxes[colname] = max(reader, key=lambda row: float(row[col_index])) This saves you the cost of closing and opening the file each time, but that's not the expensive part; you're still doing the disk reads over and over. And now anyone reading your code has to understand the trick with using file objects as iterators but resetting them, or they won't know how your code works. So, how can you avoid that? In general, whenever you need to make multiple passes over an iterator, there are two options. The simple solution is to copy the iterator into a reusable iterable, like a list: maxes = {} with open(file) as f: reader = csv.reader(f) header = next(reader) rows = list(reader) for col_index, colname in enumerate(header)[2:]: maxes[colname] = max(rows, key=lambda row: float(row[col_index])) This is not only much simpler than the earlier code, it's also much faster. Unless the file is huge. By storing all of the rows in a list, you're reading the whole file into memory at once. If it's too big to fit, your program will fail. Or, worse, if it fits, but only by using virtual memory, your program will swap parts of it in and out of memory every time you go through the loop, thrashing your swapfile and making everything slow to a crawl. The other alternative is to reorganize things so you only have to make one pass. This means you have to put the loop over the rows on the outside, and the loop over the columns on the inside. It requires rethinking the design a bit, and it means you can't just use the simple max function, but the tradeoff is probably worth it: with open(file) as f: reader = csv.reader(f) header = next(reader) maxes = {colname: float('-inf') for colname in header[2:]} for row in reader: for col_index, colname in enumerate(header)[2:]: maxes[colname] = max(maxes[colname], float(row[col_index])) You can simplify this even further—e.g., use a Counter instead of a plain dict, and a DictReader instead of a plain reader—but it's already simple, readable, and efficient as-is. A: For me Was accidentally remove data (there is no data) in .csv so get this message. so make sure to check if there is data in .csv file or not.
StopIteration Error in pythoncode while reading csv data
I am writing a program to read csv file. I have craeted a reader object and calling next() on it gives me the header row. But when I am calling it again it gives StopIteration error although there are rows in the csv file.I am doing file.seek(0) then it is working fine. Anyone please explains this to me? A snapshot of code is given below: with open(file,'r') as f: reader = csv.reader(f) header = next(reader) result = [] for colname in header[2:]: col_index = header.index(colname) # f.seek(0) next(reader)
[ "You're calling next once for each column (except the first two). So, if you have, say, 10 columns, it's going to try to read 8 rows.\nIf you have 20 rows, that's not going to raise an exception, but you'll be ignoring the last 12 rows, which you probably don't want. On the other hand, if you have only 5 rows, it's going to raise when trying to read the 6th row.\nThe reason the f.seek(0) prevents the exception is that it resets the file back to the start before each next, so you just read the header row over and over, ignoring everything else in the file. It doesn't raise anything, but it's not doing useful.\nWhat you probably wanted is something like this:\nwith open(file,'r') as f:\n reader = csv.reader(f)\n header = next(reader)\n result = []\n for row in reader:\n for col_index, colname in enumerate(header)[2:]:\n value = row[col_index]\n result.append(do_something_with(value, colname))\n\nThis reads every row exactly once, and does something with each column but the first two of each row.\n\nFrom a comment, what you actually want to do is find the maximum value for each column. So, you do need to iterate over the columns–and then, within each column, you need to iterate over the rows.\nA csv.reader is an iterator, which means you can only iterate over it once. So, if you just do this the obvious way, it won't work:\nmaxes = {}\nwith open(file) as f:\n reader = csv.reader(f)\n header = next(reader)\n for col_index, colname in enumerate(header)[2:]:\n maxes[colname] = max(reader, key=operator.itemgetter(col_index))\n\nThe first column will read whatever's left after reading the header, which is good. The next column will read whatever's left after reading the whole file, which is nothing.\n\nSo, how can you fix this?\nOne way is to re-create the iterator each time through the outer loop:\nmaxes = {}\nwith open(file) as f:\n reader = csv.reader(f)\n header = next(reader)\nfor col_index, colname in enumerate(header)[2:]:\n with open(file) as f:\n reader = csv.reader(f)\n next(reader)\n maxes[colname] = max(reader, key=lambda row: float(row[col_index]))\n\nThe problem with this is that you're reading the whole file N times, and reading the file off disk is probably by far the slowest thing your program does.\n\nWhat you were attempting to do with f.seek(0) is a trick that depends on how file objects and csv.reader objects work. While file objects are iterators, they're special, in that they have a way to reset them to the beginning (or to save a position and return to it later). And csv.reader objects are basically simple wrappers around file objects, so if you reset the file, you also reset the reader. (It's not clear that this is guaranteed to work, but if you know how csv works, you can probably convince yourself that in practice it's safe.) So:\nmaxes = {}\nwith open(file) as f:\n reader = csv.reader(f)\n header = next(reader)\n for col_index, colname in enumerate(header)[2:]:\n f.seek(0)\n next(reader)\n maxes[colname] = max(reader, key=lambda row: float(row[col_index]))\n\nThis saves you the cost of closing and opening the file each time, but that's not the expensive part; you're still doing the disk reads over and over. And now anyone reading your code has to understand the trick with using file objects as iterators but resetting them, or they won't know how your code works.\n\nSo, how can you avoid that?\nIn general, whenever you need to make multiple passes over an iterator, there are two options. The simple solution is to copy the iterator into a reusable iterable, like a list:\nmaxes = {}\nwith open(file) as f:\n reader = csv.reader(f)\n header = next(reader)\n rows = list(reader)\nfor col_index, colname in enumerate(header)[2:]:\n maxes[colname] = max(rows, key=lambda row: float(row[col_index]))\n\nThis is not only much simpler than the earlier code, it's also much faster. Unless the file is huge. By storing all of the rows in a list, you're reading the whole file into memory at once. If it's too big to fit, your program will fail. Or, worse, if it fits, but only by using virtual memory, your program will swap parts of it in and out of memory every time you go through the loop, thrashing your swapfile and making everything slow to a crawl.\n\nThe other alternative is to reorganize things so you only have to make one pass. This means you have to put the loop over the rows on the outside, and the loop over the columns on the inside. It requires rethinking the design a bit, and it means you can't just use the simple max function, but the tradeoff is probably worth it:\nwith open(file) as f:\n reader = csv.reader(f)\n header = next(reader)\n maxes = {colname: float('-inf') for colname in header[2:]}\n for row in reader:\n for col_index, colname in enumerate(header)[2:]:\n maxes[colname] = max(maxes[colname], float(row[col_index]))\n\nYou can simplify this even further—e.g., use a Counter instead of a plain dict, and a DictReader instead of a plain reader—but it's already simple, readable, and efficient as-is.\n", "For me Was accidentally remove data (there is no data) in .csv\nso get this message.\n\nso make sure to check if there is data in .csv file or not.\n\n" ]
[ 4, 0 ]
[ "Why didn't you write:\nheader = next(reader)\n\nIn the last line as well? I don't know if this is your problem, but I would start there.\n" ]
[ -1 ]
[ "csv", "iterator", "python" ]
stackoverflow_0019205807_csv_iterator_python.txt
Q: Merge 2 CSV files with no common column I have 2 csv files( 2 million each) with below structure first.csv h1,h2 2,3 4,5 second.csv h3,h4 5,6 7,8 I want to merge these 2 csv index wise column like below merged.csv h1,h2,h3,h4 2,3,5,6 4,5,7,8 A: You might be looking for the pandas.concat() function (see here). Here is an example: import pandas as pd df1 = pd.DataFrame({'A':[0,0,1,1,2],'B':[3,2,5,3,5]}) df2 = pd.DataFrame({'C':[0,0,1,1,2],'D':[-1,20,-2,33,17]}) df3 = pd.concat((df1,df2),axis=1) df3.to_csv('myFile.csv') You just have to replace df1 and df2 by your csv files using the pandas.read_csv function (here).
Merge 2 CSV files with no common column
I have 2 csv files( 2 million each) with below structure first.csv h1,h2 2,3 4,5 second.csv h3,h4 5,6 7,8 I want to merge these 2 csv index wise column like below merged.csv h1,h2,h3,h4 2,3,5,6 4,5,7,8
[ "You might be looking for the pandas.concat() function (see here). Here is an example:\nimport pandas as pd\n\ndf1 = pd.DataFrame({'A':[0,0,1,1,2],'B':[3,2,5,3,5]})\ndf2 = pd.DataFrame({'C':[0,0,1,1,2],'D':[-1,20,-2,33,17]})\n\ndf3 = pd.concat((df1,df2),axis=1)\n\ndf3.to_csv('myFile.csv')\n\nYou just have to replace df1 and df2 by your csv files using the pandas.read_csv function (here).\n" ]
[ 1 ]
[]
[]
[ "python", "python_3.x" ]
stackoverflow_0074531045_python_python_3.x.txt
Q: Insert a 2 horizontal lines on a bar plot I am trying to insert 2 lines on a bar plot with the following code: PosisEink_Liq['weights'].plot(kind='bar',color=('darkgray')) PosisEink_Liq['TAA+1'].plot(kind='line',color=('black'),linestyle = '--') PosisEink_Liq['TAA-1'].plot(kind='line',color=('black'),linestyle = '--') Unfortunately, the 2 horizontal lines do not happear along the all graph from right to left. Do you know a remedy for this problem. If possible no plot.axhline formula A: Use import pandas as pd import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = [7.50, 3.50] #Size of Graph plt.rcParams["figure.autolayout"] = True fig, ax = plt.subplots() #For multiple subplots
Insert a 2 horizontal lines on a bar plot
I am trying to insert 2 lines on a bar plot with the following code: PosisEink_Liq['weights'].plot(kind='bar',color=('darkgray')) PosisEink_Liq['TAA+1'].plot(kind='line',color=('black'),linestyle = '--') PosisEink_Liq['TAA-1'].plot(kind='line',color=('black'),linestyle = '--') Unfortunately, the 2 horizontal lines do not happear along the all graph from right to left. Do you know a remedy for this problem. If possible no plot.axhline formula
[ "Use\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = [7.50, 3.50] #Size of Graph\nplt.rcParams[\"figure.autolayout\"] = True\nfig, ax = plt.subplots() #For multiple subplots\n\n" ]
[ 1 ]
[]
[]
[ "charts", "graph", "line", "pandas", "python" ]
stackoverflow_0074530748_charts_graph_line_pandas_python.txt
Q: color raws in xls with python with conditions I would like to color raws of file.xls according to 3 parameters: if a raw contain 'freq' value between 0.11 and 0.5 and has common mutation and gene patterns from 'list1' then color the raw in yellow if a raw contain 'freq' value between 0.51 and 1 and has common mutation and gene patterns from 'list1' then color the raw in red if a raw has common mutation and gene patterns from 'list2' then color the raw in blue file.xls reference pos REF ALT qual depth freq gene mutation BX571857.1 7716 C A 1280.26 468 0.985294 pr209 P308T BX571857.1 7854 T C 3.85731e-15 410 0.031941 pr209 S354P BX571857.1 7940 T C 100168 531 1 pr210 N898D BX571857.1 9942 G A 100168 473 1 pr211 S897L list1 mutation gene P308T pr209 S354P pr209 N898D pr210 list2 mutation gene S897L pr211 How can I do that in python? I tried something like this but couldn't achieve my goal: import openpyxl from openpyxl.styles import PatternFill def Color(s, t): yellow = "00FFFF00" red = "00FF0000" blue = "000000FF" for cell in rows: if cell.fill = PatternFill(start_color=yellow, end_color=yellow, fill_type = "solid") elseif cell.fill = PatternFill(start_color=red, end_color=red, fill_type = "solid") if cell.fill = PatternFill(start_color=blue, end_color=blue, fill_type = "solid") with open('file.xls', 'r') as input_1: nt = input_1.readline( nt = int(nt) for i in range(nt): s = input_1.readline() print(s) t = input_1.readline() print(t) Color(s, t) thanks a lot! A: This is just an example on how to deal with conditional formatting: wb = openpyxl.load_workbook('file.xlsx') ws = wb.worksheets[0] def Color(s, t): yellow = "FFFFFF00" red = "00FF0000" blue = "000000FF" if s == 'C' and t == 'A': return openpyxl.styles.colors.Color(rgb=yellow) if s == 'T' and t == 'C': return openpyxl.styles.colors.Color(rgb=red) if s == 'G' and t == 'A': return openpyxl.styles.colors.Color(rgb=blue) for row in list(ws.rows)[1:]: for cell in row: color = Color(row[2].value,row[3].value) cell.fill = PatternFill(fill_type='solid',start_color=,colorend_color=color) wb.save('file1.xlsx')
color raws in xls with python with conditions
I would like to color raws of file.xls according to 3 parameters: if a raw contain 'freq' value between 0.11 and 0.5 and has common mutation and gene patterns from 'list1' then color the raw in yellow if a raw contain 'freq' value between 0.51 and 1 and has common mutation and gene patterns from 'list1' then color the raw in red if a raw has common mutation and gene patterns from 'list2' then color the raw in blue file.xls reference pos REF ALT qual depth freq gene mutation BX571857.1 7716 C A 1280.26 468 0.985294 pr209 P308T BX571857.1 7854 T C 3.85731e-15 410 0.031941 pr209 S354P BX571857.1 7940 T C 100168 531 1 pr210 N898D BX571857.1 9942 G A 100168 473 1 pr211 S897L list1 mutation gene P308T pr209 S354P pr209 N898D pr210 list2 mutation gene S897L pr211 How can I do that in python? I tried something like this but couldn't achieve my goal: import openpyxl from openpyxl.styles import PatternFill def Color(s, t): yellow = "00FFFF00" red = "00FF0000" blue = "000000FF" for cell in rows: if cell.fill = PatternFill(start_color=yellow, end_color=yellow, fill_type = "solid") elseif cell.fill = PatternFill(start_color=red, end_color=red, fill_type = "solid") if cell.fill = PatternFill(start_color=blue, end_color=blue, fill_type = "solid") with open('file.xls', 'r') as input_1: nt = input_1.readline( nt = int(nt) for i in range(nt): s = input_1.readline() print(s) t = input_1.readline() print(t) Color(s, t) thanks a lot!
[ "This is just an example on how to deal with conditional formatting:\nwb = openpyxl.load_workbook('file.xlsx') \nws = wb.worksheets[0]\n\ndef Color(s, t):\n yellow = \"FFFFFF00\"\n red = \"00FF0000\"\n blue = \"000000FF\"\n if s == 'C' and t == 'A': return openpyxl.styles.colors.Color(rgb=yellow) \n if s == 'T' and t == 'C': return openpyxl.styles.colors.Color(rgb=red) \n if s == 'G' and t == 'A': return openpyxl.styles.colors.Color(rgb=blue) \n \n\nfor row in list(ws.rows)[1:]:\n for cell in row:\n color = Color(row[2].value,row[3].value)\n cell.fill = PatternFill(fill_type='solid',start_color=,colorend_color=color)\n\nwb.save('file1.xlsx')\n\n" ]
[ 1 ]
[]
[]
[ "loops", "openpyxl", "python" ]
stackoverflow_0074530226_loops_openpyxl_python.txt
Q: Python Packages not installed In Python, I was using Spacy library there was trying below commands:- import spacy Getting Below Error Traceback (most recent call last): File "<stdin>", line 1, in <module> ModuleNotFoundError: No module named 'spacy' Then tried to install spacy using below command:- pip install spacy Message: It gives Requirement already satisfied. Commands Used :- import spacy pip install spacy A: Try pip install -U spacy and python -m spacy download en_core_web_sm A: Spacy module is a bit more finicky than others https://spacy.io/usage <-- use this guide to generate the terminal code to install the correct version. This is just an example from the above code generator link. You need to generate the installation code that satisfies your own needs. pip install -U pip setuptools wheel pip install -U spacy python -m spacy download en_core_web_sm In your case becsue you have already installed some version of it I would recommend: A.) uninstalling the whole module pip uninstall spacy OR Don't know how to uninstall unwanted Spacy installation, model B.) Creating a new fresh environemnt There are some other libraries that ahev to be adjusted as well. So it is important to search more complex frameworks how to install before installing them in a "generalist way". Further examples.: https://rapids.ai/start.html https://pytorch.org/
Python Packages not installed
In Python, I was using Spacy library there was trying below commands:- import spacy Getting Below Error Traceback (most recent call last): File "<stdin>", line 1, in <module> ModuleNotFoundError: No module named 'spacy' Then tried to install spacy using below command:- pip install spacy Message: It gives Requirement already satisfied. Commands Used :- import spacy pip install spacy
[ "Try pip install -U spacy and python -m spacy download en_core_web_sm\n", "\nSpacy module is a bit more finicky than others\nhttps://spacy.io/usage <-- use this guide to generate the terminal code to install the correct version.\nThis is just an example from the above code generator link.\nYou need to generate the installation code that satisfies your own needs.\n\npip install -U pip setuptools wheel\npip install -U spacy\npython -m spacy download en_core_web_sm\n\nIn your case becsue you have already installed some version of it I would recommend:\n\nA.) uninstalling the whole module\n\npip uninstall spacy\nOR\nDon't know how to uninstall unwanted Spacy installation, model\n\n\nB.) Creating a new fresh environemnt\n\n\nThere are some other libraries that ahev to be adjusted as well.\n\nSo it is important to search more complex frameworks how to install before installing them in a \"generalist way\".\nFurther examples.:\n\nhttps://rapids.ai/start.html\nhttps://pytorch.org/\n\n\n\n" ]
[ 0, 0 ]
[ "Try running !pip install spacy in a cell in your notebook.\nIf that doesn't work, it might be possible that your terminal is in different environment and code is running in different environment. Activate same environment in both cases.\n" ]
[ -1 ]
[ "python", "spacy" ]
stackoverflow_0074531051_python_spacy.txt
Q: Function & Method description on call I am trying to describe my functions and class methods. What I mean is that you can force function/methods to control input and outputvalues if the type of the data is the same as defined one. Basic definition could look like this: def SWE4_UT1_complex(root: os.path | PurePath) -> (float | int, str | int): ... First problem I am actually trying to define multiple values inside list. To be exact I want to define that kpis variable is a list of class instances. class KPI: ... <- this is my class input_kpis = list() for i in json_input.kpis: input_kpis.append(KPI(some constructor parameters)) How to define that this kind of list has to be input parameter for function below: def set_kpis(self, kpis: list) -> None: self.kpis = kpis Second problem acutally the same I have this function (it is working without problem) def SWE1_RE1(self) -> dict: self.update_Requirements_Released() self.update_Requirements_Except_Obsolete() if self.Requirements_Except_Obsolete < 0 or self.Requirements_Released < 0: self.errors.append(f'There has been error in kpi SWE.1_RE1 for project {self.project}{f" and Release {self.release}." if self.release is not False else "."}',) return dict({ 'semi_results': { 'Requirements_Released': self.Requirements_Released, 'Requirements_Except_Obsolete': self.err_Requirements_Except_Obsolete }, 'err': 1, 'output': f'There has been error in kpi SWE.1_RE1 for project {self.project}{f" and Release {self.release}." if self.release is not False else "."}', }) elif self.Requirements_Except_Obsolete == 0: self.errors.append(f'There has been error in kpi SWE.1_RT1.\n' f'There are no requirements for project {self.project}{f" and Release {self.release}." if self.release is not False else "."}',) return dict({ 'semi_results': { 'Requirements_Released': 0, 'Requirements_Except_Obsolete': 0 }, 'err': 1, 'output': f'There has been error in kpi SWE.1_RE1.\n' f'There are no requirements for project {self.project}{f" and Release {self.release}." if self.release is not False else "."}', }) else: return dict({ 'semi_results': { 'Requirements_Released': self.Requirements_Released, 'Requirements_Except_Obsolete': self.Requirements_Except_Obsolete }, 'err': 0, 'output': round(self.Requirements_Released / self.Requirements_Except_Obsolete, 4), }) How to do the same as for the second problem with list full of KPI but for a dict with specific values. dict looks like this: dict({ str: dict({ str: float | str, str: float | str }), str: int, str: float | int | str I have tried multiple ways of definition. Such as list([KPI]), list(KPI)... It did not work. Maybe is it a correct way to use [KPI]? as a definition. With a dict I have also tried multiple version but did not work. The problem was once I defined two str: float | str in the first key and the problem was multiple same definition or something like that. A: Did you try just defining it as 'list[KPI]' this would be a list of KPI instances. (quotations needed around the type because: "Subscript for class "list" will generate runtime exception; enclose type annotation in quotes") Apologies if this is not what you were asking.
Function & Method description on call
I am trying to describe my functions and class methods. What I mean is that you can force function/methods to control input and outputvalues if the type of the data is the same as defined one. Basic definition could look like this: def SWE4_UT1_complex(root: os.path | PurePath) -> (float | int, str | int): ... First problem I am actually trying to define multiple values inside list. To be exact I want to define that kpis variable is a list of class instances. class KPI: ... <- this is my class input_kpis = list() for i in json_input.kpis: input_kpis.append(KPI(some constructor parameters)) How to define that this kind of list has to be input parameter for function below: def set_kpis(self, kpis: list) -> None: self.kpis = kpis Second problem acutally the same I have this function (it is working without problem) def SWE1_RE1(self) -> dict: self.update_Requirements_Released() self.update_Requirements_Except_Obsolete() if self.Requirements_Except_Obsolete < 0 or self.Requirements_Released < 0: self.errors.append(f'There has been error in kpi SWE.1_RE1 for project {self.project}{f" and Release {self.release}." if self.release is not False else "."}',) return dict({ 'semi_results': { 'Requirements_Released': self.Requirements_Released, 'Requirements_Except_Obsolete': self.err_Requirements_Except_Obsolete }, 'err': 1, 'output': f'There has been error in kpi SWE.1_RE1 for project {self.project}{f" and Release {self.release}." if self.release is not False else "."}', }) elif self.Requirements_Except_Obsolete == 0: self.errors.append(f'There has been error in kpi SWE.1_RT1.\n' f'There are no requirements for project {self.project}{f" and Release {self.release}." if self.release is not False else "."}',) return dict({ 'semi_results': { 'Requirements_Released': 0, 'Requirements_Except_Obsolete': 0 }, 'err': 1, 'output': f'There has been error in kpi SWE.1_RE1.\n' f'There are no requirements for project {self.project}{f" and Release {self.release}." if self.release is not False else "."}', }) else: return dict({ 'semi_results': { 'Requirements_Released': self.Requirements_Released, 'Requirements_Except_Obsolete': self.Requirements_Except_Obsolete }, 'err': 0, 'output': round(self.Requirements_Released / self.Requirements_Except_Obsolete, 4), }) How to do the same as for the second problem with list full of KPI but for a dict with specific values. dict looks like this: dict({ str: dict({ str: float | str, str: float | str }), str: int, str: float | int | str I have tried multiple ways of definition. Such as list([KPI]), list(KPI)... It did not work. Maybe is it a correct way to use [KPI]? as a definition. With a dict I have also tried multiple version but did not work. The problem was once I defined two str: float | str in the first key and the problem was multiple same definition or something like that.
[ "Did you try just defining it as 'list[KPI]' this would be a list of KPI instances. (quotations needed around the type because: \"Subscript for class \"list\" will generate runtime exception; enclose type annotation in quotes\")\nApologies if this is not what you were asking.\n" ]
[ 1 ]
[]
[]
[ "python", "python_3.10", "python_3.x" ]
stackoverflow_0074530700_python_python_3.10_python_3.x.txt
Q: Finding specific cell inside Pandas Dataframe based on most similar column and index labels (when compared to references) I have dataframe with around 500 columns and 300 rows and it looks like the example below. I need to select specific dataframe cell based on most similar column label and index label when compared to a reference. Let me explain my problem: Let's say that I need to find a cell which has column label most similar to refence x=0.561697 and index label most similar to reference y=-0.12849. Most probably, there are not the references' labels among my dataframe's columns and index, however there are very similar labels and that's what I want to find. Another problem is that sometimes is the similarity up to three decimal number, sometimes only up to two decimal numbers. An ideal output would be a cell which column and index labels are most similar to the references (i.e. they share the same number on most decimal numbers). Can I somehow modify methods "loc" and "iloc" to search for similarity and not exact label/values? Or are there other methods right for this purpose (even outside pandas)? Thank you for suggestions. #example of my dataframe my_index=[[-0.176931, -0.15578987, -0.134648739]] my_columns=[[0.447852, 0.568911395, 0.31997079, 0.451030185, 0.45208958]] data=[[-6.027819824, -7.581473207, -9.277630354, -10.967289156, -12.490250252], [-13.749975397, -14.709719522, -15.317946078, -15.45502317, -14.990571819], [-13.922128986, -12.463674538, -10.987597885, -9.843527599, -9.179409063]] df=pd.DataFrame(data) df.columns=my_columns df1=df.set_index(my_index) df1 Using this example, the desired output would be only the cell with value "-12.463675" with column label "0.568911395" and index label "-0.134648739" DataFrame df1: 0.447852 0.568911 0.319971 0.451030 0.452090 -0.176931 -6.027820 -7.581473 -9.277630 -10.967289 -12.490250 -0.155790 -13.749975 -14.709720 -15.317946 -15.455023 -14.990572 -0.134649 -13.922129 -12.463675 -10.987598 -9.843528 -9.179409 A: Assuming you fix you columns' Index to be 1D: df1.columns = my_columns[0] # Float64Index([0.447852, 0.568911395, 0.31997079, 0.451030185, 0.45208958], dtype='float64') You can use the minimal absolute difference to your target: import numpy as np out = df1.iloc[np.argmin(abs(df1.index-y)), np.argmin(abs(df1.columns-x))] output: -12.463674538 Intermediate: np.argmin(abs(df1.index-y)), np.argmin(abs(df1.columns-x)) output: (2, 1) Coordinates: y_pos = np.argmin(abs(df1.index-y)) x_pos = np.argmin(abs(df1.columns-x)) df1.index[y_pos], df1.columns[x_pos] output: (-0.134648739, 0.568911395)
Finding specific cell inside Pandas Dataframe based on most similar column and index labels (when compared to references)
I have dataframe with around 500 columns and 300 rows and it looks like the example below. I need to select specific dataframe cell based on most similar column label and index label when compared to a reference. Let me explain my problem: Let's say that I need to find a cell which has column label most similar to refence x=0.561697 and index label most similar to reference y=-0.12849. Most probably, there are not the references' labels among my dataframe's columns and index, however there are very similar labels and that's what I want to find. Another problem is that sometimes is the similarity up to three decimal number, sometimes only up to two decimal numbers. An ideal output would be a cell which column and index labels are most similar to the references (i.e. they share the same number on most decimal numbers). Can I somehow modify methods "loc" and "iloc" to search for similarity and not exact label/values? Or are there other methods right for this purpose (even outside pandas)? Thank you for suggestions. #example of my dataframe my_index=[[-0.176931, -0.15578987, -0.134648739]] my_columns=[[0.447852, 0.568911395, 0.31997079, 0.451030185, 0.45208958]] data=[[-6.027819824, -7.581473207, -9.277630354, -10.967289156, -12.490250252], [-13.749975397, -14.709719522, -15.317946078, -15.45502317, -14.990571819], [-13.922128986, -12.463674538, -10.987597885, -9.843527599, -9.179409063]] df=pd.DataFrame(data) df.columns=my_columns df1=df.set_index(my_index) df1 Using this example, the desired output would be only the cell with value "-12.463675" with column label "0.568911395" and index label "-0.134648739" DataFrame df1: 0.447852 0.568911 0.319971 0.451030 0.452090 -0.176931 -6.027820 -7.581473 -9.277630 -10.967289 -12.490250 -0.155790 -13.749975 -14.709720 -15.317946 -15.455023 -14.990572 -0.134649 -13.922129 -12.463675 -10.987598 -9.843528 -9.179409
[ "Assuming you fix you columns' Index to be 1D:\ndf1.columns = my_columns[0]\n# Float64Index([0.447852, 0.568911395, 0.31997079, 0.451030185, 0.45208958], dtype='float64')\n\nYou can use the minimal absolute difference to your target:\nimport numpy as np\n\nout = df1.iloc[np.argmin(abs(df1.index-y)), np.argmin(abs(df1.columns-x))]\n\noutput: -12.463674538\nIntermediate:\nnp.argmin(abs(df1.index-y)), np.argmin(abs(df1.columns-x))\n\noutput: (2, 1)\nCoordinates:\ny_pos = np.argmin(abs(df1.index-y))\nx_pos = np.argmin(abs(df1.columns-x))\ndf1.index[y_pos], df1.columns[x_pos]\n\noutput: (-0.134648739, 0.568911395)\n" ]
[ 3 ]
[]
[]
[ "dataframe", "numpy", "pandas", "python" ]
stackoverflow_0074530609_dataframe_numpy_pandas_python.txt
Q: Importing the numpy C-extensions failed Azure function When running one of my functions which includes pandas, I get the following error message: Result: Failure Exception: ImportError: Unable to import required dependencies: numpy: IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: * The Python version is: Python3.9 from "/usr/local/bin/python" * The NumPy version is: "1.23.4" and make sure that they are the versions you expect. Please carefully study the documentation linked above for further help. Original error was: libopenblas64_p-r0-742d56dc.3.20.so: cannot open shared object file: No such file or directory . Please check the requirements.txt file for the missing module. For more info, please refer the troubleshooting guide: https://aka.ms/functions-modulenotfound Stack: File "/azure-functions-host/workers/python/3.9/LINUX/X64/azure_functions_worker/dispatcher.py", line 365, in _handle__function_load_request func = loader.load_function( File "/azure-functions-host/workers/python/3.9/LINUX/X64/azure_functions_worker/utils/wrappers.py", line 48, in call raise extend_exception_message(e, message) File "/azure-functions-host/workers/python/3.9/LINUX/X64/azure_functions_worker/utils/wrappers.py", line 44, in call return func(*args, **kwargs) File "/azure-functions-host/workers/python/3.9/LINUX/X64/azure_functions_worker/loader.py", line 134, in load_function mod = importlib.import_module(fullmodname) File "/usr/local/lib/python3.9/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1030, in _gcd_import File "<frozen importlib._bootstrap>", line 1007, in _find_and_load File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 680, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 850, in exec_module File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed File "/home/site/wwwroot/PROD_Avg_Tot_Debits_And_Account_Balance/__init__.py", line 6, in <module> import pandas as pd File "/home/site/wwwroot/.python_packages/lib/site-packages/pandas/__init__.py", line 16, in <module> raise ImportError( I have reinstalled pandas and numpy, I have force updated them both. I have tried reverting to an older version of numpy (1.21.0). What is my next step in debugging this issue? A: I have created the Azure Functions Python Version 3.9.13 with the below packages Pandas and NumPy as 1.5.1 and 1.23.5: Written the sample code that uses pandas and NumPy packages and it is working as expected: import logging import pandas as pd import numpy as np import azure.functions as func def main(req: func.HttpRequest) -> func.HttpResponse: logging.info('Python HTTP trigger function processed a request.') S = pd.Series([11, 28, 72, 3, 5, 8]) arr = np.array( [[ 1, 2, 3], [ 4, 2, 5]] ) print(S.values) print("Size of numpy array: ", arr.size) return func.HttpResponse( "Hello Krishna, This HTTP triggered function executed successfully.", status_code=200 )
Importing the numpy C-extensions failed Azure function
When running one of my functions which includes pandas, I get the following error message: Result: Failure Exception: ImportError: Unable to import required dependencies: numpy: IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! Importing the numpy C-extensions failed. This error can happen for many reasons, often due to issues with your setup or how NumPy was installed. We have compiled some common reasons and troubleshooting tips at: https://numpy.org/devdocs/user/troubleshooting-importerror.html Please note and check the following: * The Python version is: Python3.9 from "/usr/local/bin/python" * The NumPy version is: "1.23.4" and make sure that they are the versions you expect. Please carefully study the documentation linked above for further help. Original error was: libopenblas64_p-r0-742d56dc.3.20.so: cannot open shared object file: No such file or directory . Please check the requirements.txt file for the missing module. For more info, please refer the troubleshooting guide: https://aka.ms/functions-modulenotfound Stack: File "/azure-functions-host/workers/python/3.9/LINUX/X64/azure_functions_worker/dispatcher.py", line 365, in _handle__function_load_request func = loader.load_function( File "/azure-functions-host/workers/python/3.9/LINUX/X64/azure_functions_worker/utils/wrappers.py", line 48, in call raise extend_exception_message(e, message) File "/azure-functions-host/workers/python/3.9/LINUX/X64/azure_functions_worker/utils/wrappers.py", line 44, in call return func(*args, **kwargs) File "/azure-functions-host/workers/python/3.9/LINUX/X64/azure_functions_worker/loader.py", line 134, in load_function mod = importlib.import_module(fullmodname) File "/usr/local/lib/python3.9/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1030, in _gcd_import File "<frozen importlib._bootstrap>", line 1007, in _find_and_load File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 680, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 850, in exec_module File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed File "/home/site/wwwroot/PROD_Avg_Tot_Debits_And_Account_Balance/__init__.py", line 6, in <module> import pandas as pd File "/home/site/wwwroot/.python_packages/lib/site-packages/pandas/__init__.py", line 16, in <module> raise ImportError( I have reinstalled pandas and numpy, I have force updated them both. I have tried reverting to an older version of numpy (1.21.0). What is my next step in debugging this issue?
[ "I have created the Azure Functions Python Version 3.9.13 with the below packages Pandas and NumPy as 1.5.1 and 1.23.5:\n\nWritten the sample code that uses pandas and NumPy packages and it is working as expected:\nimport logging\nimport pandas as pd\nimport numpy as np \nimport azure.functions as func\n\ndef main(req: func.HttpRequest) -> func.HttpResponse:\nlogging.info('Python HTTP trigger function processed a request.')\n\nS = pd.Series([11, 28, 72, 3, 5, 8])\narr = np.array( [[ 1, 2, 3],\n[ 4, 2, 5]] )\n\nprint(S.values)\nprint(\"Size of numpy array: \", arr.size)\n\nreturn func.HttpResponse(\n\"Hello Krishna, This HTTP triggered function executed successfully.\",\nstatus_code=200\n)\n\n\n" ]
[ 0 ]
[]
[]
[ "azure_functions", "numpy", "python" ]
stackoverflow_0074493372_azure_functions_numpy_python.txt
Q: ShapelyDeprecationWarnings and the use of "geoms" Some lines to look up geographical information by given pair of coordinates, referenced from https://gis.stackexchange.com/questions/254869/projecting-google-maps-coordinate-to-lookup-country-in-shapefile. import geopandas as gpd from shapely.geometry import Point pt = Point(8.7333333, 53.1333333) # countries shapefile from # http://thematicmapping.org/downloads/world_borders.php folder = 'C:\\My Documents\\' data = gpd.read_file(folder + 'TM_WORLD_BORDERS-0.3.shp') for index, row in data.iterrows(): poly = row['geometry'] if poly.contains(pt): print (row) # ---------- Print out as ----------------------------------- FIPS GM ISO2 DE ISO3 DEU UN 276 NAME Germany AREA 34895 POP2005 82652369 REGION 150 SUBREGION 155 LON 9.851 LAT 51.11 geometry (POLYGON ((8.710255000000018 47.69680799999997... Name: 71, dtype: object It works but prints out paragraphs of ShapelyDeprecationWarnings: C:\Python38\lib\site-packages\pandas\core\dtypes\inference.py:384: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry. iter(obj) # Can iterate over it. C:\Python38\lib\site-packages\pandas\core\dtypes\inference.py:385: ShapelyDeprecationWarning: __len__ for multi-part geometries is deprecated and will be removed in Shapely 2.0. Check the length of the `geoms` property instead to get the number of parts of a multi-part geometry. len(obj) # Has a length associated with it. C:\Python38\lib\site-packages\pandas\io\formats\printing.py:120: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry. s = iter(seq) C:\Python38\lib\site-packages\pandas\io\formats\printing.py:124: ShapelyDeprecationWarning: __len__ for multi-part geometries is deprecated and will be removed in Shapely 2.0. Check the length of the `geoms` property instead to get the number of parts of a multi-part geometry. for i in range(min(nitems, len(seq))) C:\Python38\lib\site-packages\pandas\io\formats\printing.py:128: ShapelyDeprecationWarning: __len__ for multi-part geometries is deprecated and will be removed in Shapely 2.0. Check the length of the `geoms` property instead to get the number of parts of a multi-part geometry. if nitems < len(seq): To update the above code to avoid ShapelyDeprecationWarnings, I've tried to replace "for index, row in data.iterrows()" to "for index, row in data.geoms", and "poly = row['geometry']" to "poly = row.geoms". Neither worked. What's the right way to update the code to avoid ShapelyDeprecationWarnings? A: Let's start by examining the geometry column with data.geometry. This reveals that the geometry contains normal polygons and multipolygons. 0 MULTIPOLYGON (((-61.68667 17.02444, -61.88722 ... 1 POLYGON ((2.96361 36.80222, 4.78583 36.89472, ... ... New answer The error is only caused by Geopandas doing some operations on the row when we are trying to print it. A row is a pandas.Series object. Simple solution is to temporarily drop the geometry column from the dataframe, before we retrieve a single row for printing. for index, row in data.iterrows(): poly = row['geometry'] if poly.contains(pt): print(data.drop(columns='geometry') .iloc[[row.name]] .iloc[0]) Old answer The geoms property only exists on MULTIPOLYGONS. Thus you have to adjust your code to deal with this accordingly: for index, row in data.iterrows(): results = [] if hasattr(row.geometry, "geoms"): for part in row.geometry.geoms: if part.contains(pt): results.append(row) elif row.geometry.contains(pt): results.append(row) else: pass for row in results: print(data.drop(columns='geometry').iloc[[row.name]].iloc[0])
ShapelyDeprecationWarnings and the use of "geoms"
Some lines to look up geographical information by given pair of coordinates, referenced from https://gis.stackexchange.com/questions/254869/projecting-google-maps-coordinate-to-lookup-country-in-shapefile. import geopandas as gpd from shapely.geometry import Point pt = Point(8.7333333, 53.1333333) # countries shapefile from # http://thematicmapping.org/downloads/world_borders.php folder = 'C:\\My Documents\\' data = gpd.read_file(folder + 'TM_WORLD_BORDERS-0.3.shp') for index, row in data.iterrows(): poly = row['geometry'] if poly.contains(pt): print (row) # ---------- Print out as ----------------------------------- FIPS GM ISO2 DE ISO3 DEU UN 276 NAME Germany AREA 34895 POP2005 82652369 REGION 150 SUBREGION 155 LON 9.851 LAT 51.11 geometry (POLYGON ((8.710255000000018 47.69680799999997... Name: 71, dtype: object It works but prints out paragraphs of ShapelyDeprecationWarnings: C:\Python38\lib\site-packages\pandas\core\dtypes\inference.py:384: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry. iter(obj) # Can iterate over it. C:\Python38\lib\site-packages\pandas\core\dtypes\inference.py:385: ShapelyDeprecationWarning: __len__ for multi-part geometries is deprecated and will be removed in Shapely 2.0. Check the length of the `geoms` property instead to get the number of parts of a multi-part geometry. len(obj) # Has a length associated with it. C:\Python38\lib\site-packages\pandas\io\formats\printing.py:120: ShapelyDeprecationWarning: Iteration over multi-part geometries is deprecated and will be removed in Shapely 2.0. Use the `geoms` property to access the constituent parts of a multi-part geometry. s = iter(seq) C:\Python38\lib\site-packages\pandas\io\formats\printing.py:124: ShapelyDeprecationWarning: __len__ for multi-part geometries is deprecated and will be removed in Shapely 2.0. Check the length of the `geoms` property instead to get the number of parts of a multi-part geometry. for i in range(min(nitems, len(seq))) C:\Python38\lib\site-packages\pandas\io\formats\printing.py:128: ShapelyDeprecationWarning: __len__ for multi-part geometries is deprecated and will be removed in Shapely 2.0. Check the length of the `geoms` property instead to get the number of parts of a multi-part geometry. if nitems < len(seq): To update the above code to avoid ShapelyDeprecationWarnings, I've tried to replace "for index, row in data.iterrows()" to "for index, row in data.geoms", and "poly = row['geometry']" to "poly = row.geoms". Neither worked. What's the right way to update the code to avoid ShapelyDeprecationWarnings?
[ "Let's start by examining the geometry column with data.geometry. This reveals that the geometry contains normal polygons and multipolygons.\n0 MULTIPOLYGON (((-61.68667 17.02444, -61.88722 ...\n1 POLYGON ((2.96361 36.80222, 4.78583 36.89472, ...\n...\n\nNew answer\nThe error is only caused by Geopandas doing some operations on the row when we are trying to print it. A row is a pandas.Series object.\nSimple solution is to temporarily drop the geometry column from the dataframe, before we retrieve a single row for printing.\nfor index, row in data.iterrows():\n poly = row['geometry']\n if poly.contains(pt):\n print(data.drop(columns='geometry')\n .iloc[[row.name]]\n .iloc[0])\n\n\nOld answer\nThe geoms property only exists on MULTIPOLYGONS.\nThus you have to adjust your code to deal with this accordingly:\nfor index, row in data.iterrows():\n\n results = [] \n\n if hasattr(row.geometry, \"geoms\"):\n for part in row.geometry.geoms:\n if part.contains(pt):\n results.append(row)\n \n elif row.geometry.contains(pt):\n results.append(row)\n \n else:\n pass\n\n for row in results:\n print(data.drop(columns='geometry').iloc[[row.name]].iloc[0])\n\n" ]
[ 1 ]
[]
[]
[ "geopandas", "python", "shapely" ]
stackoverflow_0074529728_geopandas_python_shapely.txt
Q: How to get pytest-cov only if total coverage lower then 90% So as I understood, pytest-cov has an option to fail if total coverage is lower than some %. But can I output hole tablet only in case if total cov is lower then 90% and if it is upper it won't show anything? Example of command line code A: pytest-cov doesn't have this option, but you don't have to use pytest-cov to produce the report. Use the plugin to run coverage, then in a separate command, produce the report: coverage report. You can conditionally run that separate command based on whatever condition you want.
How to get pytest-cov only if total coverage lower then 90%
So as I understood, pytest-cov has an option to fail if total coverage is lower than some %. But can I output hole tablet only in case if total cov is lower then 90% and if it is upper it won't show anything? Example of command line code
[ "pytest-cov doesn't have this option, but you don't have to use pytest-cov to produce the report. Use the plugin to run coverage, then in a separate command, produce the report: coverage report. You can conditionally run that separate command based on whatever condition you want.\n" ]
[ 0 ]
[]
[]
[ "code_coverage", "pytest", "pytest_cov", "python", "test_coverage" ]
stackoverflow_0074522777_code_coverage_pytest_pytest_cov_python_test_coverage.txt
Q: Slice pandas dataframe column into multiple columns using substring dataframe 'df' has the following data - Column A Column B Item_ID1 Information - information for item that has ID as 1\nPrice - $7.99\nPlace - Albany, NY Item_ID2 Information - item's information with ID as 2\nPrice - $5.99\nPlace - Ottawa, ON How to segregate the values from column B using 'Information', 'Price', and 'Place' into different columns like - Column A Information Price Place Item_ID1 information for item that has ID as 1 $7.99 Albany, NY Item_ID2 item's information with ID as 2 $5.99 Ottawa, ON I tried splitting the column B based on string values like 'Information - ', 'Price - ', 'Place - ' but that is becoming more complicated and the very first slice has information on Price and Place which is not required in it. A: You can approach this by using pandas.Series.split : df[["Information", "Price", "Place"]]= df.pop("Column B").str.split(r"\\n", expand=True) df= df.astype(str).apply(lambda x: x.replace(x.name, "", regex=True).str.strip(" - ")) # Output : print(df.to_string()) Column A Information Price Place 0 Item_ID1 information for item that has ID as 1 $7.99 Albany, NY 1 Item_ID2 item's information with ID as 2 $5.99 Ottawa, ON A: For a generic method in which you do not need to know the future columns in advance, you can use str.extractall and a pivot: out = df.drop(columns='Column B').join( df['Column B'] .str.extractall(r'([^-]+) - ([^\n]+)\n?') .droplevel('match') .pivot(columns=0, values=1) ) NB. I am assuming that you have real newlines, if you have instead the two characters \ and n, you can convert with df['Column B'] = df['Column B'].str.replace(r'\\n', '\n') Output: Column A Information Place Price 0 Item_ID1 information for item that has ID as 1 Albany, NY $7.99 1 Item_ID2 item's information with ID as 2 Ottawa, ON $5.99 A: Another possible solution, based on the following ideas: Split Column B by \s-\s|\\n, using pandas.Series.str.split. Reshape the result, using numpy.reshape. Apply pandas.pivot_table. (pd.concat([df['Column A'], pd.DataFrame( df['Column B'].str.split(r'\s-\s|\\n', expand=True, regex=True).values .reshape((-1,2))) .pivot_table(columns=0, values=1, aggfunc=list) .pipe(lambda d: d.explode(d.columns.tolist(), ignore_index=True))], axis=1)) Output: Column A Information Place Price 0 Item_ID1 information for item that has ID as 1 Albany, NY $7.99 1 Item_ID2 item's information with ID as 2 Ottawa, ON $5.99
Slice pandas dataframe column into multiple columns using substring
dataframe 'df' has the following data - Column A Column B Item_ID1 Information - information for item that has ID as 1\nPrice - $7.99\nPlace - Albany, NY Item_ID2 Information - item's information with ID as 2\nPrice - $5.99\nPlace - Ottawa, ON How to segregate the values from column B using 'Information', 'Price', and 'Place' into different columns like - Column A Information Price Place Item_ID1 information for item that has ID as 1 $7.99 Albany, NY Item_ID2 item's information with ID as 2 $5.99 Ottawa, ON I tried splitting the column B based on string values like 'Information - ', 'Price - ', 'Place - ' but that is becoming more complicated and the very first slice has information on Price and Place which is not required in it.
[ "You can approach this by using pandas.Series.split :\ndf[[\"Information\", \"Price\", \"Place\"]]= df.pop(\"Column B\").str.split(r\"\\\\n\", expand=True)\n\ndf= df.astype(str).apply(lambda x: x.replace(x.name, \"\", regex=True).str.strip(\" - \"))\n\n# Output :\nprint(df.to_string())\n\n Column A Information Price Place\n0 Item_ID1 information for item that has ID as 1 $7.99 Albany, NY\n1 Item_ID2 item's information with ID as 2 $5.99 Ottawa, ON\n\n", "For a generic method in which you do not need to know the future columns in advance, you can use str.extractall and a pivot:\nout = df.drop(columns='Column B').join(\n df['Column B']\n .str.extractall(r'([^-]+) - ([^\\n]+)\\n?')\n .droplevel('match')\n .pivot(columns=0, values=1)\n)\n\nNB. I am assuming that you have real newlines, if you have instead the two characters \\ and n, you can convert with df['Column B'] = df['Column B'].str.replace(r'\\\\n', '\\n')\nOutput:\n Column A Information Place Price\n0 Item_ID1 information for item that has ID as 1 Albany, NY $7.99\n1 Item_ID2 item's information with ID as 2 Ottawa, ON $5.99\n\n", "Another possible solution, based on the following ideas:\n\nSplit Column B by \\s-\\s|\\\\n, using pandas.Series.str.split.\n\nReshape the result, using numpy.reshape.\n\nApply pandas.pivot_table.\n\n\n(pd.concat([df['Column A'], pd.DataFrame(\n df['Column B'].str.split(r'\\s-\\s|\\\\n', expand=True, regex=True).values\n .reshape((-1,2)))\n .pivot_table(columns=0, values=1, aggfunc=list)\n .pipe(lambda d: d.explode(d.columns.tolist(), ignore_index=True))], axis=1))\n\nOutput:\n Column A Information Place Price\n0 Item_ID1 information for item that has ID as 1 Albany, NY $7.99\n1 Item_ID2 item's information with ID as 2 Ottawa, ON $5.99\n\n" ]
[ 2, 2, 1 ]
[]
[]
[ "dataframe", "pandas", "python" ]
stackoverflow_0074529438_dataframe_pandas_python.txt
Q: Python -- Function with a Pandas dataframe as an argument I have to create a function that takes a Pandas dataframe as an argument and returns a copy of the dataframe after replacing the null values in each column with the most frequent value in the column. Cannot use FOR or WHILE loops. A: Well, to create a copy you can simply use df.copy(deep = True) (note that deep = True creates a new dataframe-object, otherwise you get a reference to the copied dataframe). To replace the null values with the most frequent values, you can use the mode method for Series and DataFrames (https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.mode.html). An example would be: df = df.fillna(df.mode().iloc[0])
Python -- Function with a Pandas dataframe as an argument
I have to create a function that takes a Pandas dataframe as an argument and returns a copy of the dataframe after replacing the null values in each column with the most frequent value in the column. Cannot use FOR or WHILE loops.
[ "Well, to create a copy you can simply use df.copy(deep = True) (note that deep = True creates a new dataframe-object, otherwise you get a reference to the copied dataframe).\nTo replace the null values with the most frequent values, you can use the mode method for Series and DataFrames (https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.mode.html).\nAn example would be:\ndf = df.fillna(df.mode().iloc[0])\n" ]
[ 1 ]
[]
[]
[ "anaconda", "dataframe", "function", "pandas", "python" ]
stackoverflow_0074531219_anaconda_dataframe_function_pandas_python.txt
Q: Can't change Column to array - int64 I have a CSV dataset with 2 columns that looks like the following: Date Open 25/2/21 7541.85 26/2/21 7562.32 27/2/21 7521.65 28/2/21 7509.14 Data columns (total 2 columns): # Column Non-Null Count Dtype 0 Open 1280 non-null object 1 Date 1280 non-null datetime64[ns] dtypes: datetime64ns, object(1) When trying to pass this through a timeseries model I get the following error: ftse_open = TimeSeries.from_dataframe(ftse_open, time_col='Date', value_cols='Open') ValueError: could not convert string to float: '7,541.85' Then I try a different route using the following code: ftse_open["Open"] = ftse_open["Open"].astype('Int64') Yielding: TypeError: object cannot be converted to an IntegerDtype I have tried more code to resolve but I'm not sure why there seems to be no solution that I can find. (There are no NAs in the dataset - I have checked). Any help is appreciated, thank you. A: Based on comments, you can try: df["Open"] = df["Open"].str.replace(",", "").astype(float) print(df) Prints: Date Open 0 25/2/21 7541.85 1 26/2/21 7562.32 2 27/2/21 7521.65 3 28/2/21 7509.14 df used: Date Open 0 25/2/21 7,541.85 1 26/2/21 7,562.32 2 27/2/21 7,521.65 3 28/2/21 7,509.14
Can't change Column to array - int64
I have a CSV dataset with 2 columns that looks like the following: Date Open 25/2/21 7541.85 26/2/21 7562.32 27/2/21 7521.65 28/2/21 7509.14 Data columns (total 2 columns): # Column Non-Null Count Dtype 0 Open 1280 non-null object 1 Date 1280 non-null datetime64[ns] dtypes: datetime64ns, object(1) When trying to pass this through a timeseries model I get the following error: ftse_open = TimeSeries.from_dataframe(ftse_open, time_col='Date', value_cols='Open') ValueError: could not convert string to float: '7,541.85' Then I try a different route using the following code: ftse_open["Open"] = ftse_open["Open"].astype('Int64') Yielding: TypeError: object cannot be converted to an IntegerDtype I have tried more code to resolve but I'm not sure why there seems to be no solution that I can find. (There are no NAs in the dataset - I have checked). Any help is appreciated, thank you.
[ "Based on comments, you can try:\ndf[\"Open\"] = df[\"Open\"].str.replace(\",\", \"\").astype(float)\nprint(df)\n\nPrints:\n Date Open\n0 25/2/21 7541.85\n1 26/2/21 7562.32\n2 27/2/21 7521.65\n3 28/2/21 7509.14\n\n\ndf used:\n Date Open\n0 25/2/21 7,541.85\n1 26/2/21 7,562.32\n2 27/2/21 7,521.65\n3 28/2/21 7,509.14\n\n" ]
[ 1 ]
[]
[]
[ "arrays", "python" ]
stackoverflow_0074531095_arrays_python.txt
Q: gevent not valid running Docker Registry I am attempting to run a Docker registry on Ubuntu 14 using the following command: sudo gunicorn --access-logfile - --debug -k gevent -b 0.0.0.0:5000 -w 1 docker_registry.wsgi:application Unfortunately, when I attempt this I get the following failure message: Error: class uri 'gevent' invalid or not found: [Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/gunicorn/util.py", line 139, in load_class mod = import_module('.'.join(components)) File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module __import__(name) File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/ggevent.py", line 24, in <module> from gevent.server import StreamServer File "/usr/local/lib/python2.7/dist-packages/gevent/server.py", line 6, in <module> from gevent.socket import EWOULDBLOCK, socket File "/usr/local/lib/python2.7/dist-packages/gevent/socket.py", line 659, in <module> from gevent.ssl import sslwrap_simple as ssl, SSLError as sslerror, SSLSocket as SSLType File "/usr/local/lib/python2.7/dist-packages/gevent/ssl.py", line 386, in <module> def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None): NameError: name 'PROTOCOL_SSLv3' is not defined I did some searching and discovered something that told me to install python-gevent in order to get the gevent: sudo apt-get install python-gevent I did this and a subsequent invocation of dpkg: dpkg -l|grep python shows that python-gevent is installed. Unfortunately, the install has not resolved the problem. I am still getting the same failure message saying that gevent isn't valid or found. Does anyone know how to resolve this problem??? Please advise... A: re-Install python-gevent apt-get install python-gevent pip install --upgrade gevent A: try it: 1.find your python gevent package folder: $ cd /usr/local/lib/python2.7/dist-packages/gevent 2.update ssl.py from: def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None): to: def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None): 3.try again: $sudo gunicorn --access-logfile - --debug -k gevent -b 0.0.0.0:5000 -w 1 docker_registry.wsgi:application A: If you're getting this in late 2022 or later, know that gevent versions 20.9.0 and earlier depend on greenlet, without specifying an upper bound on that version. greenlet has just released a breaking change in v2.x.x, and gevent doesn't work with it. Pinning greenlet=1.1.3 fixed it for me. More details about this issue here: https://github.com/python-greenlet/greenlet/issues/178
gevent not valid running Docker Registry
I am attempting to run a Docker registry on Ubuntu 14 using the following command: sudo gunicorn --access-logfile - --debug -k gevent -b 0.0.0.0:5000 -w 1 docker_registry.wsgi:application Unfortunately, when I attempt this I get the following failure message: Error: class uri 'gevent' invalid or not found: [Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/gunicorn/util.py", line 139, in load_class mod = import_module('.'.join(components)) File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module __import__(name) File "/usr/local/lib/python2.7/dist-packages/gunicorn/workers/ggevent.py", line 24, in <module> from gevent.server import StreamServer File "/usr/local/lib/python2.7/dist-packages/gevent/server.py", line 6, in <module> from gevent.socket import EWOULDBLOCK, socket File "/usr/local/lib/python2.7/dist-packages/gevent/socket.py", line 659, in <module> from gevent.ssl import sslwrap_simple as ssl, SSLError as sslerror, SSLSocket as SSLType File "/usr/local/lib/python2.7/dist-packages/gevent/ssl.py", line 386, in <module> def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None): NameError: name 'PROTOCOL_SSLv3' is not defined I did some searching and discovered something that told me to install python-gevent in order to get the gevent: sudo apt-get install python-gevent I did this and a subsequent invocation of dpkg: dpkg -l|grep python shows that python-gevent is installed. Unfortunately, the install has not resolved the problem. I am still getting the same failure message saying that gevent isn't valid or found. Does anyone know how to resolve this problem??? Please advise...
[ "re-Install python-gevent\napt-get install python-gevent\npip install --upgrade gevent\n", "try it:\n1.find your python gevent package folder:\n$ cd /usr/local/lib/python2.7/dist-packages/gevent\n\n2.update ssl.py\nfrom:\ndef get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):\n\nto:\ndef get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):\n\n3.try again:\n$sudo gunicorn --access-logfile - --debug -k gevent -b 0.0.0.0:5000 -w 1 docker_registry.wsgi:application\n\n", "If you're getting this in late 2022 or later, know that gevent versions 20.9.0 and earlier depend on greenlet, without specifying an upper bound on that version. greenlet has just released a breaking change in v2.x.x, and gevent doesn't work with it.\nPinning greenlet=1.1.3 fixed it for me. More details about this issue here: https://github.com/python-greenlet/greenlet/issues/178\n" ]
[ 4, 1, 0 ]
[]
[]
[ "docker", "python", "ubuntu_14.04" ]
stackoverflow_0034314222_docker_python_ubuntu_14.04.txt
Q: Dash duplicate paths issue I'm working on a dash application and I got this error. I haven't found anything on stackoverflow related to this issue. Exception: modules ['pages..ipynb_checkpoints.app_Km-checkpoint', 'pages.app_Km'] have duplicate paths A: Do you use the dash multipage plugin? Could you provide your code and project structure? I have encountered a similar error myself. I am using the DASH multipage plugin, dash version 2.6.2 My problem was that I was running my application from a different PYTHONPATH than where is my app.py - because my application structure looks like that for deployment purposes. My app structure is (simplified): . └── dash_app ├── app.py ├── callback_extensions │ └── login_callbacks.py └── pages ├── afterlogin.py ├── login.py └── page1.py The PYTHONPATH (path, where Python looks for modules) is set to the root directory of the project (one directory up to the /dash_app directory where app.py - DASH object is instantiated) On dash_app/pages/afterlogin.py file, I was registering a new page: dash.register_page(__name__) Then, I was importing one variable from the afterlogin.py to a different script, the dash_app/callback_extensions/login_callbacks.py. I got the following exception and the app didn't load: Exception: modules ['dash_app.pages.afterlogin', 'pages.afterlogin'] have duplicate paths When removing the import in the afterlogin.py file, the problems were resolved. My guess is there exists a reference to the page app_Km.py within the .ipynb_checkpoints with a different PATH (like in my case, the module is the same but it has different referencing) that creates a 'conflict' and DASH evaluates this as a different module, when in fact it is just a reference to the same one. Suggestions: Did you try deleting the .ipynb_checkpoints altogether (are you using Jupyter notebook to run your project?) I hope this helps.
Dash duplicate paths issue
I'm working on a dash application and I got this error. I haven't found anything on stackoverflow related to this issue. Exception: modules ['pages..ipynb_checkpoints.app_Km-checkpoint', 'pages.app_Km'] have duplicate paths
[ "Do you use the dash multipage plugin? Could you provide your code and project structure?\n\nI have encountered a similar error myself. I am using the DASH\nmultipage plugin, dash version 2.6.2\n\nMy problem was that I was running my application from a different\nPYTHONPATH than where is my app.py - because my application structure\nlooks like that for deployment purposes.\n\nMy app structure is (simplified):\n\n\n.\n└── dash_app\n ├── app.py\n ├── callback_extensions\n │ └── login_callbacks.py\n └── pages\n ├── afterlogin.py\n ├── login.py\n └── page1.py\n\n\nThe PYTHONPATH (path, where Python looks for modules) is set to the root directory of the project (one directory up to the /dash_app directory where app.py - DASH object is instantiated)\n\nOn dash_app/pages/afterlogin.py file, I was registering a new page:\ndash.register_page(__name__)\n\nThen, I was importing one variable from the afterlogin.py to a different script, the dash_app/callback_extensions/login_callbacks.py.\n\nI got the following exception and the app didn't load:\n\n\nException: modules ['dash_app.pages.afterlogin', 'pages.afterlogin'] have duplicate paths\nWhen removing the import in the afterlogin.py file, the problems were resolved. My guess is there exists a reference to the page app_Km.py within the .ipynb_checkpoints with a different PATH (like in my case, the module is the same but it has different referencing) that creates a 'conflict' and DASH evaluates this as a different module, when in fact it is just a reference to the same one.\nSuggestions:\n\nDid you try deleting the .ipynb_checkpoints altogether (are you using Jupyter notebook to run your project?)\n\nI hope this helps.\n" ]
[ 0 ]
[]
[]
[ "plotly_dash", "python" ]
stackoverflow_0073964756_plotly_dash_python.txt
Q: Filter model without using distinct method I have a model with a list of products. Each product has an ID, price, brand, etc. I want return all the objects of the model where brand name is distinct. I am currently using django's built-in SQLite, so it does not support something like products = Product.objects.all().distinct('brand') Is there another way of returning all the objects where the brand name is distinct? A: As SQLight doesn't support .distinct('field') you need to do this directly in python. For example: products = list({p.brand: p for p in Product.objects.all()}.values()) A: Well you can do it with 2 different methods: def MethodName(self): query = """ SELECT DISTINCT brand FROM Product; """ self.execute(query) products = Product.objects.raw(""" SELECT DISTINCT brand FROM Product; """) Please reply to this message If you have any difficulties fetching. A: Ok, so try distinctBrands = Product.objects.values('brand').annotate(count=Count('brand')).filter(count=1) products = Products.objects.filter( brand__in=distinctBrands ).all()
Filter model without using distinct method
I have a model with a list of products. Each product has an ID, price, brand, etc. I want return all the objects of the model where brand name is distinct. I am currently using django's built-in SQLite, so it does not support something like products = Product.objects.all().distinct('brand') Is there another way of returning all the objects where the brand name is distinct?
[ "As SQLight doesn't support .distinct('field') you need to do this directly in python. For example:\nproducts = list({p.brand: p for p in Product.objects.all()}.values())\n\n", "Well you can do it with 2 different methods:\n\ndef MethodName(self):\nquery = \"\"\"\n SELECT DISTINCT brand FROM Product;\n \"\"\"\nself.execute(query)\n\n\nproducts = Product.objects.raw(\"\"\"\nSELECT DISTINCT brand FROM Product;\n\"\"\")\n\n\n\nPlease reply to this message If you have any difficulties fetching.\n", "Ok, so try\ndistinctBrands = Product.objects.values('brand').annotate(count=Count('brand')).filter(count=1)\n\nproducts = Products.objects.filter(\n brand__in=distinctBrands \n).all()\n\n" ]
[ 2, 0, 0 ]
[ "try this\nproducts = set(Product.objects.values_list('brand'))\n\n" ]
[ -1 ]
[ "django", "python" ]
stackoverflow_0074531014_django_python.txt
Q: Why do i keep encountering this error when i try to migrate on django app return Database.Cursor.execute(self, query, params) django.db.utils.OperationalError: no such table: theblog_categories i expected to migrate succesfully A: This is because the migrations in Django are little complex and often with little changes it doesn't reflect the changes in the DB. Please delete Migrations file from the Django app and then migrate it again. You can also refer this link if the problem persist: Django migrate --fake and --fake-initial explained If the problem still persist please reply to this comment.
Why do i keep encountering this error when i try to migrate on django app
return Database.Cursor.execute(self, query, params) django.db.utils.OperationalError: no such table: theblog_categories i expected to migrate succesfully
[ "This is because the migrations in Django are little complex and often with little changes it doesn't reflect the changes in the DB.\nPlease delete Migrations file from the Django app and then migrate it again.\nYou can also refer this link if the problem persist:\nDjango migrate --fake and --fake-initial explained\nIf the problem still persist please reply to this comment.\n" ]
[ 0 ]
[]
[]
[ "django", "django_models", "python" ]
stackoverflow_0074529777_django_django_models_python.txt
Q: Why is model._meta.get_fields() returning unexpected relationship column names, and can this be prevented? Imagine I have some models as below: class User(AbstractUser): pass class Medium(models.Model): researcher = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True, related_name="medium_researcher") old_medium_name = models.CharField(max_length=20, null=True, blank=True) class Uptake(models.Model): material_quality = models.CharField(max_length=20, null=True, blank=True) medium = models.ForeignKey(Medium, on_delete=models.CASCADE, blank=True, null=True, related_name="uptake_medium") Now I have a function to return all column names to generate some overview in my HTML as such: from database.models import Medium MODEL_HEADERS=[f.name for f in Medium._meta.get_fields()] MODEL_HEADERS ['uptake_medium', 'id', 'researcher', 'old_medium_name'] Why does this return uptake_medium? As this is a ForeignKey relation set within the Uptake model, it should only be present within the Uptake model right? When I review the admin models this column does not show up, neither in the db.sqlite3 model when checking Uptake, so it seems to be sort of hidden, and only show up when requested with _meta. The relationship seems to be correct... This is causing a lot of problems with my code, and it would be great if only the 'non-meta' columns only could be returned. How should I approach? A: Why does this return uptake_medium? As this is a ForeignKey relation set within the Uptake model, it should only be present within the Uptake model right? You can access the relation in reverse, for example: my_medium.uptake_medium.all() to obtain all Updates related to the Medium instance named medium. You can also filter on that field, for example: Medium.objects.filter(uptake_medium__material_quantity=42) hence it is accessible just like any field. You can filter with: from django.db.models.fields.reverse_related import ManyToOneRel [f.name for f in Medium._meta.get_fields() if not isinstance(f, ManyToOneRel)]
Why is model._meta.get_fields() returning unexpected relationship column names, and can this be prevented?
Imagine I have some models as below: class User(AbstractUser): pass class Medium(models.Model): researcher = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True, related_name="medium_researcher") old_medium_name = models.CharField(max_length=20, null=True, blank=True) class Uptake(models.Model): material_quality = models.CharField(max_length=20, null=True, blank=True) medium = models.ForeignKey(Medium, on_delete=models.CASCADE, blank=True, null=True, related_name="uptake_medium") Now I have a function to return all column names to generate some overview in my HTML as such: from database.models import Medium MODEL_HEADERS=[f.name for f in Medium._meta.get_fields()] MODEL_HEADERS ['uptake_medium', 'id', 'researcher', 'old_medium_name'] Why does this return uptake_medium? As this is a ForeignKey relation set within the Uptake model, it should only be present within the Uptake model right? When I review the admin models this column does not show up, neither in the db.sqlite3 model when checking Uptake, so it seems to be sort of hidden, and only show up when requested with _meta. The relationship seems to be correct... This is causing a lot of problems with my code, and it would be great if only the 'non-meta' columns only could be returned. How should I approach?
[ "\nWhy does this return uptake_medium? As this is a ForeignKey relation set within the Uptake model, it should only be present within the Uptake model right?\n\nYou can access the relation in reverse, for example:\nmy_medium.uptake_medium.all()\n\nto obtain all Updates related to the Medium instance named medium.\nYou can also filter on that field, for example:\nMedium.objects.filter(uptake_medium__material_quantity=42)\n\nhence it is accessible just like any field.\nYou can filter with:\nfrom django.db.models.fields.reverse_related import ManyToOneRel\n\n[f.name for f in Medium._meta.get_fields() if not isinstance(f, ManyToOneRel)]\n\n" ]
[ 1 ]
[]
[]
[ "django", "django_models", "foreign_keys", "metadata", "python" ]
stackoverflow_0074530943_django_django_models_foreign_keys_metadata_python.txt
Q: Im getting the error "{"error":{"code":null,"message":"The URI is malformed."}}", when "&" is passed in the api below def get_dw_dim_channel_by_channel(self, channel): url = 'db-warehouse-dw/dim_channel?$filter=channel_name eq \'{}\''.format(channel) print ("debug: url = {}{}".format(self.host, url)) return self.get(url, headers=self.headers, auth=self.auth) Here the value for channel_name is "WDC Kitchen & Bath Center". The error after executing the above code: {"error":{"code":null,"message":"The URI is malformed."}} What must I do to fix it? A: The ampersand needs to be percent-encoded. Try to use: channel = channel.replace("&", "%26")
Im getting the error "{"error":{"code":null,"message":"The URI is malformed."}}", when "&" is passed in the api below
def get_dw_dim_channel_by_channel(self, channel): url = 'db-warehouse-dw/dim_channel?$filter=channel_name eq \'{}\''.format(channel) print ("debug: url = {}{}".format(self.host, url)) return self.get(url, headers=self.headers, auth=self.auth) Here the value for channel_name is "WDC Kitchen & Bath Center". The error after executing the above code: {"error":{"code":null,"message":"The URI is malformed."}} What must I do to fix it?
[ "The ampersand needs to be percent-encoded.\nTry to use:\nchannel = channel.replace(\"&\", \"%26\")\n\n" ]
[ 0 ]
[]
[]
[ "api", "coda", "python", "python_behave" ]
stackoverflow_0074531327_api_coda_python_python_behave.txt
Q: Compare two lists and update the properties in Python I have two lists in Python something similar. list1 = [ {"name": "sample1", "place": "sampleplace1", "value": "", "time": "sampletime" }, {"name": "sample2", "place": "sampleplace2", "value": "", "time": "sampletime2" } ] list2 = [ {"name": "sample1", "value": "10" }, {"name": "sample2", "value": "20" } ] I need to compare both the lists and whereever the name is matching, I need to update the value property in list1. I did by running a for loop on list1, get the matching list object from list2 for each list1 object and update the value. I'm just wondering, is there a way to do this without running a for loop (something like Linq in C#)? A: Sadly, Python does not have the same abilities as LINQ. If you don't want to explicitly use a function there is map, but it uses a loop under the hood, as LINQ does. You need for loops, like in : list1 = [ {"name": "sample1", "place": "sampleplace1", "value": "", "time": "sampletime" }, {"name": "sample2", "place": "sampleplace2", "value": "", "time": "sampletime2" } ] list2 = [ {"name": "sample1", "value": "10" }, {"name": "sample2", "value": "20" } ] for elem1 in list1: for elem2 in list2: if elem1["name"] == elem2["name"]: # match ! we replace the value elem1["value"] = elem2["value"] break # and stop searching else: print(f"no match in list2 for {elem1['name']=}") # just for displaying the result import json print(json.dumps(list1, indent=2)) [ { "name": "sample1", "place": "sampleplace1", "value": "10", "time": "sampletime" }, { "name": "sample2", "place": "sampleplace2", "value": "20", "time": "sampletime2" } ]
Compare two lists and update the properties in Python
I have two lists in Python something similar. list1 = [ {"name": "sample1", "place": "sampleplace1", "value": "", "time": "sampletime" }, {"name": "sample2", "place": "sampleplace2", "value": "", "time": "sampletime2" } ] list2 = [ {"name": "sample1", "value": "10" }, {"name": "sample2", "value": "20" } ] I need to compare both the lists and whereever the name is matching, I need to update the value property in list1. I did by running a for loop on list1, get the matching list object from list2 for each list1 object and update the value. I'm just wondering, is there a way to do this without running a for loop (something like Linq in C#)?
[ "Sadly, Python does not have the same abilities as LINQ.\nIf you don't want to explicitly use a function there is map, but it uses a loop under the hood, as LINQ does.\nYou need for loops, like in :\nlist1 = [\n {\"name\": \"sample1\",\n \"place\": \"sampleplace1\",\n \"value\": \"\",\n \"time\": \"sampletime\"\n },\n {\"name\": \"sample2\",\n \"place\": \"sampleplace2\",\n \"value\": \"\",\n \"time\": \"sampletime2\"\n }\n]\n\nlist2 = [\n {\"name\": \"sample1\",\n \"value\": \"10\"\n\n },\n {\"name\": \"sample2\",\n \"value\": \"20\"\n }\n]\n\nfor elem1 in list1:\n for elem2 in list2:\n if elem1[\"name\"] == elem2[\"name\"]:\n # match ! we replace the value\n elem1[\"value\"] = elem2[\"value\"]\n break # and stop searching\n else:\n print(f\"no match in list2 for {elem1['name']=}\")\n\n# just for displaying the result\nimport json\nprint(json.dumps(list1, indent=2))\n\n[\n {\n \"name\": \"sample1\",\n \"place\": \"sampleplace1\",\n \"value\": \"10\",\n \"time\": \"sampletime\"\n },\n {\n \"name\": \"sample2\",\n \"place\": \"sampleplace2\",\n \"value\": \"20\",\n \"time\": \"sampletime2\"\n }\n]\n\n" ]
[ 0 ]
[]
[]
[ "list", "python", "python_3.x" ]
stackoverflow_0074522485_list_python_python_3.x.txt
Q: Django - How to Get ID before ensuring form is valid I am facing an issue where I have multiple forms on one page. What I am trying to do is update an item using an update form. The issue is I am unable to display the most current data that is in DB in the template. When I request the pk, it comes from a different model that is not related to this one. I need to get the ID before ensuring that the form is valid so I can filter and get the correct item based on the ID in order to display the data that I need. models.py class DevIssues(models.Model): ISSUE_CODE = [ ('BUG', 'Bug'), ('BACKLOG', 'Backlog'), ('REQUEST', 'Request'), ('TODO', 'To-Do'), ] ISSUE_STATUS = [ ('NEW', 'New'), ('WIP', 'In Progress'), ('Complete', 'Complete'), ] project = models.ForeignKey(DevProjects, on_delete=models.CASCADE, related_name='issue') issue = models.CharField(max_length=100) issue_desc = models.CharField(max_length=500) issue_code = models.CharField(max_length=9, choices=ISSUE_CODE, null=True, blank=True) issue_status = models.CharField(max_length=15, choices=ISSUE_STATUS, default='New') created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateField(auto_now=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) forms.py class AddProjectIssues(forms.ModelForm): class Meta: model = DevIssues fields= ["issue", "issue_desc", "issue_code"] labels = { 'issue': 'Issue', 'issue_desc': 'Issue Description', 'issue_code': 'Issue Code', } class UpdateProjectIssues(forms.ModelForm): class Meta: model = DevIssues fields= ["issue_status"] views.py issue_project = get_object_or_404(DevProjects, pk=pk) issues = DevIssues.objects.filter(project=issue_project).order_by('-created_at') if request.method == 'POST' and 'addissue' in request.POST: issue_form = AddProjectIssues(request.POST or None) if issue_form.is_valid(): content = request.POST.get('issue') content2 = request.POST.get('issue_desc') content3 = request.POST.get('issue_code') issues = DevIssues.objects.create(issue=content, issue_desc=content2, issue_code=content3, project=project, created_by=request.user) issues.save() return redirect("/projects") else: issue_form = AddProjectIssues() content1 = DevIssues.objects.filter(id= **here**).first() #here I need to get ID before so I can filter and get the correct data to be displayed. if request.method == 'POST' and 'updateissue' in request.POST: update_issue_form = UpdateProjectIssues(request.POST, instance=content1) if update_issue_form.is_valid(): content = request.POST.get('issue_status') content2 = request.POST.get('updateissue') notes = DevIssues.objects.filter(id=content2).update(issue_status=content) return redirect('/projects') else: update_issue_form = UpdateProjectIssues(instance=content1) Again, the PK that I am getting through the request for DevProjects as this Issue form is based on a DevProject. How can I get the ID of an item, without requesting the PK via URL in order to filter and get the correct data to display? Any and all help is appreciated. A: You will need to fetch the Urls via Ajax and not by Django URL dispatcher. As per my understanding you have multiple forms and after submitting the 1st form you want to get its pk and by which you want to fill the 2nd form and store the primary key in the another model table. Please use axios to call the Urls. You can reply to this message to convey anything.
Django - How to Get ID before ensuring form is valid
I am facing an issue where I have multiple forms on one page. What I am trying to do is update an item using an update form. The issue is I am unable to display the most current data that is in DB in the template. When I request the pk, it comes from a different model that is not related to this one. I need to get the ID before ensuring that the form is valid so I can filter and get the correct item based on the ID in order to display the data that I need. models.py class DevIssues(models.Model): ISSUE_CODE = [ ('BUG', 'Bug'), ('BACKLOG', 'Backlog'), ('REQUEST', 'Request'), ('TODO', 'To-Do'), ] ISSUE_STATUS = [ ('NEW', 'New'), ('WIP', 'In Progress'), ('Complete', 'Complete'), ] project = models.ForeignKey(DevProjects, on_delete=models.CASCADE, related_name='issue') issue = models.CharField(max_length=100) issue_desc = models.CharField(max_length=500) issue_code = models.CharField(max_length=9, choices=ISSUE_CODE, null=True, blank=True) issue_status = models.CharField(max_length=15, choices=ISSUE_STATUS, default='New') created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateField(auto_now=True) created_by = models.ForeignKey(User, on_delete=models.CASCADE) forms.py class AddProjectIssues(forms.ModelForm): class Meta: model = DevIssues fields= ["issue", "issue_desc", "issue_code"] labels = { 'issue': 'Issue', 'issue_desc': 'Issue Description', 'issue_code': 'Issue Code', } class UpdateProjectIssues(forms.ModelForm): class Meta: model = DevIssues fields= ["issue_status"] views.py issue_project = get_object_or_404(DevProjects, pk=pk) issues = DevIssues.objects.filter(project=issue_project).order_by('-created_at') if request.method == 'POST' and 'addissue' in request.POST: issue_form = AddProjectIssues(request.POST or None) if issue_form.is_valid(): content = request.POST.get('issue') content2 = request.POST.get('issue_desc') content3 = request.POST.get('issue_code') issues = DevIssues.objects.create(issue=content, issue_desc=content2, issue_code=content3, project=project, created_by=request.user) issues.save() return redirect("/projects") else: issue_form = AddProjectIssues() content1 = DevIssues.objects.filter(id= **here**).first() #here I need to get ID before so I can filter and get the correct data to be displayed. if request.method == 'POST' and 'updateissue' in request.POST: update_issue_form = UpdateProjectIssues(request.POST, instance=content1) if update_issue_form.is_valid(): content = request.POST.get('issue_status') content2 = request.POST.get('updateissue') notes = DevIssues.objects.filter(id=content2).update(issue_status=content) return redirect('/projects') else: update_issue_form = UpdateProjectIssues(instance=content1) Again, the PK that I am getting through the request for DevProjects as this Issue form is based on a DevProject. How can I get the ID of an item, without requesting the PK via URL in order to filter and get the correct data to display? Any and all help is appreciated.
[ "You will need to fetch the Urls via Ajax and not by Django URL dispatcher. As per my understanding you have multiple forms and after submitting the 1st form you want to get its pk and by which you want to fill the 2nd form and store the primary key in the another model table. Please use axios to call the Urls.\nYou can reply to this message to convey anything.\n" ]
[ 0 ]
[]
[]
[ "django", "python" ]
stackoverflow_0074526516_django_python.txt
Q: How to solve, file not found error from script, but works in notebook? I have a code to load a json file. with open("data/movie_data.json", "r") as j: word_map = json.load(j) The data folder is in current directory. However, this code works in the jupyter notebook, but while running from a script, it says, file not found error. Both the script and notebook are in same folder, that contains 'data' folder. What am I missing? A: Try "./data/movie_data.json" instead of "data/movie_data.json"
How to solve, file not found error from script, but works in notebook?
I have a code to load a json file. with open("data/movie_data.json", "r") as j: word_map = json.load(j) The data folder is in current directory. However, this code works in the jupyter notebook, but while running from a script, it says, file not found error. Both the script and notebook are in same folder, that contains 'data' folder. What am I missing?
[ "Try\n\"./data/movie_data.json\"\n\ninstead of\n\"data/movie_data.json\"\n\n" ]
[ 0 ]
[]
[]
[ "filenotfoundexception", "json", "python" ]
stackoverflow_0074531421_filenotfoundexception_json_python.txt
Q: Perform tasks when the user has responded to a recation, otherwise do nothing I have a problem. I want that when the user writes a message, my bot should send a message. And once the user has responded to that message, the user should send further instructions. I have the problem that when the user sends a message, the bot sends the message with the reactions, but as soon as the user sends a second message. The bot sends a message again. The bot should therefore only send another message once the user has reacted. How can this be done? I tried this with the responsed variable. import asyncio import discord from discord.ext import commands from datetime import datetime class command(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_message(self, message): if str(message.channel.type) == "private": if message.author == self.bot.user: return else: # check if channel exists if (channel is None): responsed = True if(responsed): responsed = False ... await message.author.send("Hello") # add reactions reaction, user = await self.bot.wait_for('reaction_add', check=check) # check reactions if(reaction.emoji == '✅'): responsed = True else: pass # do nothing else: await message.author.send("How are you?") async def setup(bot): await bot.add_cog(command(bot)) User sends a Message 1.1. check if user reacted 1.2 if a message is sent and not reacted then pass Bot replies with Hello -> Wait for reaction 2.1 bot adds reaction 2.2 if user sends a second message 2 will started again. That should be forbidden. User Reacts Bot send How are you A: The problem with what you're trying to do, is that discord.py, and the discord API in general, is designed to run a bot on multiple servers, on multiple channels, etc. This means that, if you want to forbid a user sending a second message, you have to specify what you mean with that. Not twice on the same channel? Not twice for the same user? Should there be some form of a timeout? I see you're currently working only in private channels (which you can also check with if isinstance(message.channel, discord.abc.PrivateChannel) if you want to do that properly) In that case, you might want to restrict it like "not if in the last x messages in the private channel." You can do this with another guard clause at start, here shown with the entire list you're using: if not isinstance(message.channel, discord.abc.PrivateChannel): return if message.author == self.bot.user: return previous_messages = await message.channel.history(limit=10).flatten() if any([(message.content == "Hello" and message.author == self.bot.user) for message in previous_messages]): return ... # Rest of the function I personally don't think it's the cleanest, but it will at least work. (if I didn't make a typo somewhere...) If I can give you one do-something-else bit of advice: use buttons and views instead. Those have this type of callback functionality build-in. :-) I hope that helps! :-)
Perform tasks when the user has responded to a recation, otherwise do nothing
I have a problem. I want that when the user writes a message, my bot should send a message. And once the user has responded to that message, the user should send further instructions. I have the problem that when the user sends a message, the bot sends the message with the reactions, but as soon as the user sends a second message. The bot sends a message again. The bot should therefore only send another message once the user has reacted. How can this be done? I tried this with the responsed variable. import asyncio import discord from discord.ext import commands from datetime import datetime class command(commands.Cog): def __init__(self, bot): self.bot = bot @commands.Cog.listener() async def on_message(self, message): if str(message.channel.type) == "private": if message.author == self.bot.user: return else: # check if channel exists if (channel is None): responsed = True if(responsed): responsed = False ... await message.author.send("Hello") # add reactions reaction, user = await self.bot.wait_for('reaction_add', check=check) # check reactions if(reaction.emoji == '✅'): responsed = True else: pass # do nothing else: await message.author.send("How are you?") async def setup(bot): await bot.add_cog(command(bot)) User sends a Message 1.1. check if user reacted 1.2 if a message is sent and not reacted then pass Bot replies with Hello -> Wait for reaction 2.1 bot adds reaction 2.2 if user sends a second message 2 will started again. That should be forbidden. User Reacts Bot send How are you
[ "The problem with what you're trying to do, is that discord.py, and the discord API in general, is designed to run a bot on multiple servers, on multiple channels, etc.\nThis means that, if you want to forbid a user sending a second message, you have to specify what you mean with that. Not twice on the same channel? Not twice for the same user? Should there be some form of a timeout?\nI see you're currently working only in private channels (which you can also check with if isinstance(message.channel, discord.abc.PrivateChannel) if you want to do that properly) In that case, you might want to restrict it like \"not if in the last x messages in the private channel.\" You can do this with another guard clause at start, here shown with the entire list you're using:\nif not isinstance(message.channel, discord.abc.PrivateChannel):\n return\n\nif message.author == self.bot.user:\n return\n\nprevious_messages = await message.channel.history(limit=10).flatten()\nif any([(message.content == \"Hello\" and message.author == self.bot.user) for message in previous_messages]):\n return\n\n... # Rest of the function\n\nI personally don't think it's the cleanest, but it will at least work. (if I didn't make a typo somewhere...)\nIf I can give you one do-something-else bit of advice: use buttons and views instead. Those have this type of callback functionality build-in. :-)\nI hope that helps! :-)\n" ]
[ 1 ]
[]
[]
[ "discord", "discord.py", "python" ]
stackoverflow_0074529815_discord_discord.py_python.txt
Q: Is there a better way to use numpy in that case? My output looks like that, but isn't my code bad practice? Is there a way to replace the for with numpy functions? [[ 1. 1.5 2. 2.5 3. ] [ 3.5 4. 4.5 5. 5.5] [ 6. 6.5 7. 7.5 8. ] [ 8.5 9. 9.5 10. 10.5] [11. 11.5 12. 12.5 13. ] [13.5 14. 14.5 15. 15.5] [16. 16.5 17. 17.5 18. ] [18.5 19. 19.5 20. 20.5]] import numpy as np list = [] x = 0.5 for i in range(8): temp = [] list.append(temp) for j in range(5): x += 0.5 temp.append(x) array = np.array(list) A: Not necessarily bad practice (except for calling your variable list) but it can be improved significanty by using np.arange as follows: arr = np.arange(1,21,0.5).reshape((8,5)) A: You would not use a loop with numpy, but rather vectorial code. You seem to want numpy.arange combined with reshape: n, m = 8, 5 start = 0.5 step = 0.5 out = np.arange(start+step, start+step*(n*m+1), step).reshape(n, m) Output: array([[ 1. , 1.5, 2. , 2.5, 3. ], [ 3.5, 4. , 4.5, 5. , 5.5], [ 6. , 6.5, 7. , 7.5, 8. ], [ 8.5, 9. , 9.5, 10. , 10.5], [11. , 11.5, 12. , 12.5, 13. ], [13.5, 14. , 14.5, 15. , 15.5], [16. , 16.5, 17. , 17.5, 18. ], [18.5, 19. , 19.5, 20. , 20.5]]) A: First you initiallize the array with np.zeros with the final size. Then you select each position to assign x. import numpy as np x = 0.5 array = np.zeros((8,5)) for i in range(8): for j in range(5): x += 0.5 array[i,j] = x A: You should use np.arange like other answers have pointed out. But you can also use normal python range. np.array([*range(10, 41*5+1, 5)]).reshape(8,5) / 10
Is there a better way to use numpy in that case?
My output looks like that, but isn't my code bad practice? Is there a way to replace the for with numpy functions? [[ 1. 1.5 2. 2.5 3. ] [ 3.5 4. 4.5 5. 5.5] [ 6. 6.5 7. 7.5 8. ] [ 8.5 9. 9.5 10. 10.5] [11. 11.5 12. 12.5 13. ] [13.5 14. 14.5 15. 15.5] [16. 16.5 17. 17.5 18. ] [18.5 19. 19.5 20. 20.5]] import numpy as np list = [] x = 0.5 for i in range(8): temp = [] list.append(temp) for j in range(5): x += 0.5 temp.append(x) array = np.array(list)
[ "Not necessarily bad practice (except for calling your variable list) but it can be improved significanty by using np.arange as follows:\narr = np.arange(1,21,0.5).reshape((8,5))\n\n", "You would not use a loop with numpy, but rather vectorial code.\nYou seem to want numpy.arange combined with reshape:\nn, m = 8, 5\nstart = 0.5\nstep = 0.5\n\nout = np.arange(start+step, start+step*(n*m+1), step).reshape(n, m)\n\nOutput:\narray([[ 1. , 1.5, 2. , 2.5, 3. ],\n [ 3.5, 4. , 4.5, 5. , 5.5],\n [ 6. , 6.5, 7. , 7.5, 8. ],\n [ 8.5, 9. , 9.5, 10. , 10.5],\n [11. , 11.5, 12. , 12.5, 13. ],\n [13.5, 14. , 14.5, 15. , 15.5],\n [16. , 16.5, 17. , 17.5, 18. ],\n [18.5, 19. , 19.5, 20. , 20.5]])\n\n", "First you initiallize the array with np.zeros with the final size. Then you select each position to assign x.\nimport numpy as np\nx = 0.5\n\narray = np.zeros((8,5))\nfor i in range(8):\n for j in range(5):\n x += 0.5\n array[i,j] = x\n\n", "You should use np.arange like other answers have pointed out. But you can also use normal python range.\nnp.array([*range(10, 41*5+1, 5)]).reshape(8,5) / 10\n\n" ]
[ 2, 2, 0, 0 ]
[]
[]
[ "numpy", "python" ]
stackoverflow_0074531286_numpy_python.txt
Q: Setting the Same Icon as Application Icon in Task bar for pyqt5 application I am finding trouble with attaching the same icon in the task bar manager for pyqt5 application as I did for the icon of pyqt5 application. I have attached below code for icon display in pyqt5, just need a bit help that how to code for displaying of same icon of Application to the task bar. import sys from SplashScreen import * from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtGui import QColor, qGray, QImage, QPainter, QPalette,QIcon from PyQt5.QtCore import Qt, QUrl from PyQt5.QtWidgets import QApplication, QWidget if __name__ == "__main__": app = QApplication(sys.argv) app.setWindowIcon(QIcon('./cricket.png')) w = QWidget() w.resize(300,300) w.setWindowTitle('Quick Cricket') w.show() sys.exit(app.exec_()) Thanks in advance A: Guess What I found the Answer. I used three lines of Code at the start of my application and then run the code and windows show me same icon as it was my logo. import ctypes myappid = 'mycompany.myproduct.subproduct.version' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) What these lines do? So in short these lines will tell the window that this is my own registered application, so I will decide the icon of it. I will give all credit to @DamonJW Stack Overflow Developer. Thanks @DamonJW. Here is the link of solution < https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105> A: The solution is in this line import ctypes myappid = 'mycompany.myproduct.subproduct.version' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
Setting the Same Icon as Application Icon in Task bar for pyqt5 application
I am finding trouble with attaching the same icon in the task bar manager for pyqt5 application as I did for the icon of pyqt5 application. I have attached below code for icon display in pyqt5, just need a bit help that how to code for displaying of same icon of Application to the task bar. import sys from SplashScreen import * from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtGui import QColor, qGray, QImage, QPainter, QPalette,QIcon from PyQt5.QtCore import Qt, QUrl from PyQt5.QtWidgets import QApplication, QWidget if __name__ == "__main__": app = QApplication(sys.argv) app.setWindowIcon(QIcon('./cricket.png')) w = QWidget() w.resize(300,300) w.setWindowTitle('Quick Cricket') w.show() sys.exit(app.exec_()) Thanks in advance
[ "Guess What I found the Answer.\nI used three lines of Code at the start of my application and then run the code and windows show me same icon as it was my logo.\nimport ctypes\nmyappid = 'mycompany.myproduct.subproduct.version' # arbitrary string\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\nWhat these lines do? So in short these lines will tell the window that this is my own registered application, so I will decide the icon of it.\nI will give all credit to @DamonJW Stack Overflow Developer. Thanks @DamonJW.\nHere is the link of solution\n<\nhttps://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105>\n", "\nThe solution is in this line\nimport ctypes\nmyappid = 'mycompany.myproduct.subproduct.version' # arbitrary string\nctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n" ]
[ 4, 0 ]
[]
[]
[ "pyqt5", "python", "user_interface" ]
stackoverflow_0067599432_pyqt5_python_user_interface.txt
Q: Dollar Universe - 'nonetype' object has no attribute 'isatty gcloud When I run a "gcloud functions call.." I don't encouter any error. When I run my cmd with Dollar Universe I have this error: ERROR: gcloud crashed (AttributeError): 'NoneType' object has no attribute 'isatty A: Thanks to ErnestoC for helping to resolve the issue. For more information, a simple update with gcloud component update didn't work for me because the Google Cloud CLI manager is disabled for my installation (see the screenshot) but my cloud console suggested me to do this: sudo yum makecache && sudo yum update kubectl google-cloud-sdk-firestore-emulator google-cloud-sdk-app-engine-go google-cloud-sdk-skaffold google-cloud-sdk-cloud-build-local google-cloud-sdk-anthos-auth google-cloud-sdk-local-extract google-cloud-sdk-app-engine-python-extras google-cloud-sdk-terraform-validator google-cloud-sdk-config-connector google-cloud-sdk-bundled-python3 google-cloud-sdk-datastore-emulator google-cloud-sdk-kpt google-cloud-sdk-bigtable-emulator google-cloud-sdk-app-engine-java google-cloud-sdk-gke-gcloud-auth-plugin google-cloud-sdk-datalab google-cloud-sdk-minikube google-cloud-sdk-pubsub-emulator google-cloud-sdk google-cloud-sdk-kubectl-oidc google-cloud-sdk-app-engine-grpc google-cloud-sdk-app-engine-python google-cloud-sdk-spanner-emulator google-cloud-sdk-cbt
Dollar Universe - 'nonetype' object has no attribute 'isatty gcloud
When I run a "gcloud functions call.." I don't encouter any error. When I run my cmd with Dollar Universe I have this error: ERROR: gcloud crashed (AttributeError): 'NoneType' object has no attribute 'isatty
[ "Thanks to ErnestoC for helping to resolve the issue.\nFor more information, a simple update with gcloud component update didn't work for me because the Google Cloud CLI manager is disabled for my installation (see the screenshot) but my cloud console suggested me to do this:\nsudo yum makecache && sudo yum update kubectl google-cloud-sdk-firestore-emulator google-cloud-sdk-app-engine-go google-cloud-sdk-skaffold google-cloud-sdk-cloud-build-local google-cloud-sdk-anthos-auth google-cloud-sdk-local-extract google-cloud-sdk-app-engine-python-extras google-cloud-sdk-terraform-validator google-cloud-sdk-config-connector google-cloud-sdk-bundled-python3 google-cloud-sdk-datastore-emulator google-cloud-sdk-kpt google-cloud-sdk-bigtable-emulator google-cloud-sdk-app-engine-java google-cloud-sdk-gke-gcloud-auth-plugin google-cloud-sdk-datalab google-cloud-sdk-minikube google-cloud-sdk-pubsub-emulator google-cloud-sdk google-cloud-sdk-kubectl-oidc google-cloud-sdk-app-engine-grpc google-cloud-sdk-app-engine-python google-cloud-sdk-spanner-emulator google-cloud-sdk-cbt\n\n\n" ]
[ 0 ]
[]
[]
[ "gcloud", "python" ]
stackoverflow_0072703063_gcloud_python.txt
Q: Django| Reverse for 'user-posts' with arguments '('',)' not found. 1 pattern(s) tried: ['user/(?P[^/]+)$'] I am following django tutorial by @CoreyMSchafer. I got error while practicing i can't find solution to it. According to my understanding its problem with reversing of url. but can't find out what is wrong Error: NoReverseMatch at / Reverse for 'user-posts' with arguments '('',)' not found. 1 pattern(s) tried: ['user/(?P[^/]+)$'] For some reason error is in head of base.html where I'm linking bootstrap. I also tried removing that link then its giving same error but at line 0 of base.html views.py: class UserPostListView(ListView): model = Post context_object_name = 'posts' template_name = 'blog/user_posts.html' paginate_by = 5 def get_queryset(self): user = get_object_or_404(User, username=self.kwargs.get('username')) return Post.objects.all().filter(author= user).order_by('-date_posted') urls.py file: from django.urls import path, include from .views import PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeletelView, UserPostListView from . import views urlpatterns = [ path('', PostListView.as_view(), name='blog-home'), path('user/<str:username>', UserPostListView.as_view(), name='user-posts'), path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'), path('post/new/', PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', PostDeletelView.as_view(), name='post-delete'), path('about/', views.about, name='blog-about'), ] user_posts.html: {% if is_paginated %} {% if page_obj.has_previous %} <a class="btn btn-outline-info mb-4" href="?page=1">First</a> <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.previous_page_number }}">Previous</a> {% endif %} {% for num in page_obj.paginator.page_range %} {% if page_obj.number == num %} <a class="btn btn-info mb-4" href="?page={{ num }}">{{ num }}</a> {% elif num > page_obj.number|add:'-3' and num < page_obj.number|add:'3' %} <a class="btn btn-outline-info mb-4" href="?page={{ num }}">{{ num }}</a> {% endif %} {% endfor %} {% if page_obj.has_next %} <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.next_page_number }}">Next</a> <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.paginator.num_pages }}">Last</a> {% endif %} {% endif %} {% endblock content %} home.html {% if is_paginated %} {% if page_obj.has_previous %} <a class="btn btn-outline-info mb-4" href="?page=1">First</a> <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.previous_page_number }}">Previous</a> {% endif %} {% for num in page_obj.paginator.page_range %} {% if page_obj.number == num %} <a class="btn btn-info mb-4" href="?page={{ num }}">{{ num }}</a> {% elif num > page_obj.number|add:'-3' and num < page_obj.number|add:'3' %} <a class="btn btn-outline-info mb-4" href="?page={{ num }}">{{ num }}</a> {% endif %} {% endfor %} {% if page_obj.has_next %} <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.next_page_number }}">Next</a> <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.paginator.num_pages }}">Last</a> {% endif %} {% endif %} post_detail.html {% extends "blog/base.html" %} {% block content %} <article class="media content-section"> <img class="rounded-circle article-img" src="{{ object.author.profile.image.url }}" alt=""> <div class="media-body"> <div class="article-metadata"> <a class="mr-2" href="{% url 'user-posts' object.author.username %}">{{ object.author }}</a> <small class="text-muted">{{ object.date_posted|date:"M d, Y"}}</small> {% if object.author == user %} <div> <a class="btn btn-secondary btn-sm mb-1" href="{% url 'post-update' object.id %}">Update</a> <a class="btn btn-danger btn-sm mb-1" href="{% url 'post-delete' object.id %}">Delete</a> </div> {% endif %} </div> <h2 class="article-title">{{ object.title }}</h2> <p class="article-content">{{ object.content }}</p> </div> </article> {% endblock content %} base.html {% extends "blog/base.html" %} {% block content %} <article class="media content-section"> <img class="rounded-circle article-img" src="{{ object.author.profile.image.url }}" alt=""> <div class="media-body"> <div class="article-metadata"> <a class="mr-2" href="{% url 'user-posts' object.author.username %}">{{ object.author }}</a> <small class="text-muted">{{ object.date_posted|date:"M d, Y"}}</small> {% if object.author == user %} <div> <a class="btn btn-secondary btn-sm mb-1" href="{% url 'post-update' object.id %}">Update</a> <a class="btn btn-danger btn-sm mb-1" href="{% url 'post-delete' object.id %}">Delete</a> </div> {% endif %} </div> <h2 class="article-title">{{ object.title }}</h2> <p class="article-content">{{ object.content }}</p> </div> </article> {% endblock content %} A: url(r'^user/(?P<username>\w{0,50})/$', UserPostListView.as_view(), name='user-posts'), just add it in your url not this path('user/<str:username>/', UserPostListView.as_view(),name='user-posts'), A: I had the same question before. In your user_posts.html and base.html, change all the name of the 'object' stuff to 'post'. Example: "object.author" -> "post.author", "object.title" -> "post.title", "object.author.username" -> "post.author.username" that's a fix for me. PS: Actually, u didn't post out the above mentioned part of the code. XD A: In the urls.py file - path('user/<str:username>/', UserPostListView.as_view(),name='user-posts'), You forget to add backslash after <str:username> A: i was watching the same course and i had the same problem. following two steps made this work for me: first make sure that you are referring to a correct HTML and then add a forward slash after your url like this: path('user/<str:username>/', UserPostListView.as_view(), name='user-posts'), if it didnt work for you use url instead of path like this: url(r'^user/(?P<username>\w{0,50})/$', UserPostListView.as_view(), name='user-posts'), A: I had the same error . My mistake was I misspelled 'object' in this line posts_detail.html <a class="mr-2" href="{%url 'user-posts' object.author.username %}">{{ object.author }}</a> this might not be the reason for your error, but anyone else stuck with this error check for typos in your HTML files. A: Replace post.author.username with post.author. It helped me. A: I ran into the same issue as well. I found that replacing the following line in blog/urls.py: urlpatterns = [ path('user/<str:username>/', UserPostListView.as_view(), name='user-posts'), ] with from django.urls import path, re_path # <- don't forget this import urlpatterns = [ re_path(r'^user/(?P<username>\w{0,50})/$', UserPostListView.as_view(), name='user-posts'), } solved the issue.
Django| Reverse for 'user-posts' with arguments '('',)' not found. 1 pattern(s) tried: ['user/(?P[^/]+)$']
I am following django tutorial by @CoreyMSchafer. I got error while practicing i can't find solution to it. According to my understanding its problem with reversing of url. but can't find out what is wrong Error: NoReverseMatch at / Reverse for 'user-posts' with arguments '('',)' not found. 1 pattern(s) tried: ['user/(?P[^/]+)$'] For some reason error is in head of base.html where I'm linking bootstrap. I also tried removing that link then its giving same error but at line 0 of base.html views.py: class UserPostListView(ListView): model = Post context_object_name = 'posts' template_name = 'blog/user_posts.html' paginate_by = 5 def get_queryset(self): user = get_object_or_404(User, username=self.kwargs.get('username')) return Post.objects.all().filter(author= user).order_by('-date_posted') urls.py file: from django.urls import path, include from .views import PostListView, PostDetailView, PostCreateView, PostUpdateView, PostDeletelView, UserPostListView from . import views urlpatterns = [ path('', PostListView.as_view(), name='blog-home'), path('user/<str:username>', UserPostListView.as_view(), name='user-posts'), path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'), path('post/new/', PostCreateView.as_view(), name='post-create'), path('post/<int:pk>/update/', PostUpdateView.as_view(), name='post-update'), path('post/<int:pk>/delete/', PostDeletelView.as_view(), name='post-delete'), path('about/', views.about, name='blog-about'), ] user_posts.html: {% if is_paginated %} {% if page_obj.has_previous %} <a class="btn btn-outline-info mb-4" href="?page=1">First</a> <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.previous_page_number }}">Previous</a> {% endif %} {% for num in page_obj.paginator.page_range %} {% if page_obj.number == num %} <a class="btn btn-info mb-4" href="?page={{ num }}">{{ num }}</a> {% elif num > page_obj.number|add:'-3' and num < page_obj.number|add:'3' %} <a class="btn btn-outline-info mb-4" href="?page={{ num }}">{{ num }}</a> {% endif %} {% endfor %} {% if page_obj.has_next %} <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.next_page_number }}">Next</a> <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.paginator.num_pages }}">Last</a> {% endif %} {% endif %} {% endblock content %} home.html {% if is_paginated %} {% if page_obj.has_previous %} <a class="btn btn-outline-info mb-4" href="?page=1">First</a> <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.previous_page_number }}">Previous</a> {% endif %} {% for num in page_obj.paginator.page_range %} {% if page_obj.number == num %} <a class="btn btn-info mb-4" href="?page={{ num }}">{{ num }}</a> {% elif num > page_obj.number|add:'-3' and num < page_obj.number|add:'3' %} <a class="btn btn-outline-info mb-4" href="?page={{ num }}">{{ num }}</a> {% endif %} {% endfor %} {% if page_obj.has_next %} <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.next_page_number }}">Next</a> <a class="btn btn-outline-info mb-4" href="?page={{ page_obj.paginator.num_pages }}">Last</a> {% endif %} {% endif %} post_detail.html {% extends "blog/base.html" %} {% block content %} <article class="media content-section"> <img class="rounded-circle article-img" src="{{ object.author.profile.image.url }}" alt=""> <div class="media-body"> <div class="article-metadata"> <a class="mr-2" href="{% url 'user-posts' object.author.username %}">{{ object.author }}</a> <small class="text-muted">{{ object.date_posted|date:"M d, Y"}}</small> {% if object.author == user %} <div> <a class="btn btn-secondary btn-sm mb-1" href="{% url 'post-update' object.id %}">Update</a> <a class="btn btn-danger btn-sm mb-1" href="{% url 'post-delete' object.id %}">Delete</a> </div> {% endif %} </div> <h2 class="article-title">{{ object.title }}</h2> <p class="article-content">{{ object.content }}</p> </div> </article> {% endblock content %} base.html {% extends "blog/base.html" %} {% block content %} <article class="media content-section"> <img class="rounded-circle article-img" src="{{ object.author.profile.image.url }}" alt=""> <div class="media-body"> <div class="article-metadata"> <a class="mr-2" href="{% url 'user-posts' object.author.username %}">{{ object.author }}</a> <small class="text-muted">{{ object.date_posted|date:"M d, Y"}}</small> {% if object.author == user %} <div> <a class="btn btn-secondary btn-sm mb-1" href="{% url 'post-update' object.id %}">Update</a> <a class="btn btn-danger btn-sm mb-1" href="{% url 'post-delete' object.id %}">Delete</a> </div> {% endif %} </div> <h2 class="article-title">{{ object.title }}</h2> <p class="article-content">{{ object.content }}</p> </div> </article> {% endblock content %}
[ "url(r'^user/(?P<username>\\w{0,50})/$', UserPostListView.as_view(), name='user-posts'),\n\njust add it in your url\nnot this\npath('user/<str:username>/', UserPostListView.as_view(),name='user-posts'),\n\n", "I had the same question before.\nIn your user_posts.html and base.html,\nchange all the name of the 'object' stuff to 'post'.\nExample:\n\"object.author\" -> \"post.author\", \n\"object.title\" -> \"post.title\", \n\"object.author.username\" -> \"post.author.username\"\n\nthat's a fix for me.\nPS: Actually, u didn't post out the above mentioned part of the code. XD\n", "In the urls.py file -\npath('user/<str:username>/', UserPostListView.as_view(),name='user-posts'),\n\nYou forget to add backslash after <str:username>\n", "i was watching the same course and i had the same problem.\nfollowing two steps made this work for me:\nfirst make sure that you are referring to a correct HTML and then add a forward slash after your url like this:\npath('user/<str:username>/', UserPostListView.as_view(), name='user-posts'),\n\nif it didnt work for you use url instead of path like this:\nurl(r'^user/(?P<username>\\w{0,50})/$', UserPostListView.as_view(), name='user-posts'),\n\n", "I had the same error . My mistake was I misspelled 'object' in this line posts_detail.html\n <a class=\"mr-2\" href=\"{%url 'user-posts' object.author.username %}\">{{ object.author }}</a>\n\nthis might not be the reason for your error, but anyone else stuck with this error check for typos in your HTML files.\n", "Replace post.author.username with post.author.\nIt helped me.\n", "I ran into the same issue as well.\nI found that replacing the following line in blog/urls.py:\nurlpatterns = [ \n\n path('user/<str:username>/', UserPostListView.as_view(), name='user-posts'),\n]\n\nwith\nfrom django.urls import path, re_path # <- don't forget this import\n\n\nurlpatterns = [\n\n re_path(r'^user/(?P<username>\\w{0,50})/$', UserPostListView.as_view(), name='user-posts'),\n}\n\n\nsolved the issue.\n" ]
[ 3, 2, 1, 1, 0, 0, 0 ]
[]
[]
[ "django", "django_pagination", "django_templates", "python" ]
stackoverflow_0060789353_django_django_pagination_django_templates_python.txt
Q: How to append two StringIO objects? X = ABC (example data) print(type(x)) ---> <class '_io.StringIO'> Y = ABC (example data) print(type(x)) ---> <class '_io.StringIO'> Z=X+Y Is it possible to append of these types data= Z.getvalue() How to achieve this with or without converting to other data types? Do we have any other ways rather than this? A: Since StringIO has a file-like interface - which means you can merge them in the same way as you would when copying files between file-like objects: from io import StringIO from shutil import copyfileobj a = StringIO('foo') b = StringIO('bar') c = StringIO() copyfileobj(a, c) copyfileobj(b, c) print(c.getvalue()) # foobar Since file-like objects also support iteration directly you can use chain from itertools to either iterate over them in sequence or create a string or a new StringIO object from them: from itertools import chain from io import StringIO a = StringIO('foo') b = StringIO('bar') c = StringIO(''.join(chain(a, b))) print(c.getvalue()) # foobar .. but in that case you can just call getvalue() and concatenate the values: from io import StringIO a = StringIO('foo') b = StringIO('bar') c = StringIO(a.getvalue() + b.getvalue()) print(c.getvalue()) # foobar .. so it kind of depends on what you want. Using itertools.chain means that you can just iterate over the contents of both StringIO buffers without creating a third object StringIO object.
How to append two StringIO objects?
X = ABC (example data) print(type(x)) ---> <class '_io.StringIO'> Y = ABC (example data) print(type(x)) ---> <class '_io.StringIO'> Z=X+Y Is it possible to append of these types data= Z.getvalue() How to achieve this with or without converting to other data types? Do we have any other ways rather than this?
[ "Since StringIO has a file-like interface - which means you can merge them in the same way as you would when copying files between file-like objects:\nfrom io import StringIO\nfrom shutil import copyfileobj\n\na = StringIO('foo')\nb = StringIO('bar')\nc = StringIO()\ncopyfileobj(a, c)\ncopyfileobj(b, c)\nprint(c.getvalue()) # foobar\n\nSince file-like objects also support iteration directly you can use chain from itertools to either iterate over them in sequence or create a string or a new StringIO object from them:\nfrom itertools import chain\nfrom io import StringIO\n\na = StringIO('foo')\nb = StringIO('bar')\nc = StringIO(''.join(chain(a, b)))\n\nprint(c.getvalue()) # foobar\n\n.. but in that case you can just call getvalue() and concatenate the values:\nfrom io import StringIO\n\na = StringIO('foo')\nb = StringIO('bar')\nc = StringIO(a.getvalue() + b.getvalue())\nprint(c.getvalue()) # foobar\n\n.. so it kind of depends on what you want. Using itertools.chain means that you can just iterate over the contents of both StringIO buffers without creating a third object StringIO object.\n" ]
[ 1 ]
[]
[]
[ "python", "stringio" ]
stackoverflow_0074529822_python_stringio.txt
Q: Discord.py | How to remove all reactions from a message added by a specific user at once Right now my bot sends a message and reacts with a list of emojis to its own message, multiple users react using the emojis the bot reacted with. After some time the bot needs to remove all reactions except the ones the bot created. Lets say if a bot send a message "react text" and reacts with emojis "yes emoji"(reacted only by bot) and "no emoji"(also reacted only by bot) and multiple users react to "yes emoji"(then reacted by bot and multiple users) and "no emoji"(then also reacted by bot and multiple users) after some time all "yes emoji"(reacted by bot and multiple users) and "no emoji"(reacted by bot and multiple users) reactions need to be removed, but bot reactions have to say, so the final result has to be bot reactions "yes emoji"(reacted only by bot) and "no emoji"(also reacted only by bot). I used: await reaction.remove(user) but it removes 1 reaction at a time and takes a while, can all reactions made by a specific user from a specific message be removed at once, how could that be done if possible? Thank you A: Using message.reactions you get a list of reactions to that message, which you should iterate over. Then on that reaction, iterate over the users which reacted with it, and if the user is not the bot, remove the reaction for this user: for reaction in message.reactions: for user in await reaction.users().flatten(): if user != client.user: #check if the user is the bot, might be slightly different for you await reaction.remove(user) A: As far as I know, there is no API call for this, meaning you'll always have to await all of them individually, if you really do not want to clear all messages. However, that does not mean you cannot speed the previous answer up. asyncio allows for awaiting multiple callbacks at once, and I would suggest using that functionality here: coroutines = [] for reaction in message.reactions: users = await reaction.users().flatten(): if not any([user == client.user for user in users]): await reaction.clear() else: coroutines.extend([reaction.remove(user) for user in users]) await asyncio.gather(*coroutines) Doing this might get you rate-limited, but it will probably still be faster. :-) Also, it does still await the reaction.users() call for every reaction. If that is a problem, you can probably structure this better, but I currently don't see a good way to do this. I hope that helps. :-)
Discord.py | How to remove all reactions from a message added by a specific user at once
Right now my bot sends a message and reacts with a list of emojis to its own message, multiple users react using the emojis the bot reacted with. After some time the bot needs to remove all reactions except the ones the bot created. Lets say if a bot send a message "react text" and reacts with emojis "yes emoji"(reacted only by bot) and "no emoji"(also reacted only by bot) and multiple users react to "yes emoji"(then reacted by bot and multiple users) and "no emoji"(then also reacted by bot and multiple users) after some time all "yes emoji"(reacted by bot and multiple users) and "no emoji"(reacted by bot and multiple users) reactions need to be removed, but bot reactions have to say, so the final result has to be bot reactions "yes emoji"(reacted only by bot) and "no emoji"(also reacted only by bot). I used: await reaction.remove(user) but it removes 1 reaction at a time and takes a while, can all reactions made by a specific user from a specific message be removed at once, how could that be done if possible? Thank you
[ "Using message.reactions you get a list of reactions to that message, which you should iterate over. Then on that reaction, iterate over the users which reacted with it, and if the user is not the bot, remove the reaction for this user:\nfor reaction in message.reactions:\n for user in await reaction.users().flatten():\n if user != client.user: #check if the user is the bot, might be slightly different for you\n await reaction.remove(user)\n\n", "As far as I know, there is no API call for this, meaning you'll always have to await all of them individually, if you really do not want to clear all messages.\nHowever, that does not mean you cannot speed the previous answer up.\nasyncio allows for awaiting multiple callbacks at once, and I would suggest using that functionality here:\ncoroutines = []\nfor reaction in message.reactions:\n users = await reaction.users().flatten():\n if not any([user == client.user for user in users]):\n await reaction.clear()\n else:\n coroutines.extend([reaction.remove(user) for user in users])\n\nawait asyncio.gather(*coroutines)\n\nDoing this might get you rate-limited, but it will probably still be faster. :-)\nAlso, it does still await the reaction.users() call for every reaction. If that is a problem, you can probably structure this better, but I currently don't see a good way to do this.\nI hope that helps. :-)\n" ]
[ 0, 0 ]
[]
[]
[ "discord", "discord.py", "python", "python_3.x" ]
stackoverflow_0068813945_discord_discord.py_python_python_3.x.txt
Q: password protect pdf files created using pisa I'm converting a html file into pdf using python pisa module. I need to password protect it. I searched everywhere in pisa module and couldn't find a solution for it. Is there anyway to password protect it using python? The constraint is I want keep my file in html format. On demand basis, I want to convert it into pdf file and password protect it. I don't want to use reportlab module. A: You can with pyPdf which is optional for pisa but has an encryption method: A Pure-Python library built as a PDF toolkit. It is capable of: extracting document information (title, author, ...), splitting documents page by page, merging documents page by page, cropping pages, merging multiple pages into a single page, encrypting and decrypting PDF files. A: you could make use of encrypt argument of pisa.CreatePDF pdf = pisa.CreatePDF(html, encrypt='password', dest=response)
password protect pdf files created using pisa
I'm converting a html file into pdf using python pisa module. I need to password protect it. I searched everywhere in pisa module and couldn't find a solution for it. Is there anyway to password protect it using python? The constraint is I want keep my file in html format. On demand basis, I want to convert it into pdf file and password protect it. I don't want to use reportlab module.
[ "You can with pyPdf which is optional for pisa but has an encryption method:\n\nA Pure-Python library built as a PDF toolkit. It is capable of:\nextracting document information (title, author, ...), splitting\n documents page by page, merging documents page by page, cropping\n pages, merging multiple pages into a single page, encrypting and\n decrypting PDF files.\n\n", "you could make use of encrypt argument of pisa.CreatePDF\npdf = pisa.CreatePDF(html, encrypt='password', dest=response)\n\n" ]
[ 1, 0 ]
[]
[]
[ "pdf_generation", "pisa", "python", "xhtml2pdf" ]
stackoverflow_0012497983_pdf_generation_pisa_python_xhtml2pdf.txt
Q: Solve integral symbolically by isolating integrand in sympy I was wondering why sympy won't solve the following problem: from sympy import * ss = symbols('s', real = True) a = symbols('a', real = True) f = Function('f') g = Function('g') eq = Integral(a*g(ss) + f(ss),(ss,0,oo)) solve(eq, a) The return is an empty solution list. I want to tell sympy enough stuff so that I get as a solution: -1*Integral(f(ss),(ss,0,oo))/Integral(g(ss),(ss,0,oo)) That is, its safe to assume integrals converge, are real-valued and non-zero. Is there any other assumption/function I can use to get the desired output? Thanks A: Your assumption about the expected result is still inaccurate. For the equation to have a solution, Integral(g(ss),(ss,0,oo)) must be guaranteed to be real and non-zero, which is in no way implied by your equations, so no result is returned. Further, it appears that if you want to solve equations involving an Integral, you need to use doit. Take a look below from sympy import * x = symbols('x', real = True) a = symbols('a', real = True) f = Function('f') eq = a+Integral(f(x), (x, 0, oo)) print('Eq.1', solve(eq, a)) eq2 = Integral(a+f(x), (x, 0, oo)) print('Eq.2', solve(eq2.doit(), a)) eq3 = Integral(a+f(x), (x, 0, 1)) print('Eq.3', solve(eq3.doit(), a)) eq4 = Integral(a+2, (x, 0, 3)) print('Eq.4', solve(eq4, a)) print('Eq.4', solve(eq4.doit(), a)) Output: Eq.1 [-Integral(f(x), (x, 0, oo))] Eq.2 [] Eq.3 [] Eq.4 [] Eq.4 [-2] Note that eq.1 is solvable, in the sense that you can move a on one side of the equation since it is not inside a limit (integrals with infinite bounds are shorthand for the limit of an integral with the respective bound approaching infinity). However, eq.2 and eq.3 are not solvable, because the limit of a sum is equal to the sum of the limits only if they converge to a real number (and, in your case, there is no guarantee that they do). Finally, eq.4 is trivially solvable, but you have to use doit. In eq.1 you can get away without it. That said, you can "overcome" the formalism, using expand. Take a look below. from sympy import * x = symbols('x', real = True) a = symbols('a', real = True) f = Function('f') g = Function('g') eq5 = a+Integral(a+f(x), (x, 0, 1)) print('Eq.5', solve(eq5.expand().doit(), a)) eq6 = Integral(a+f(x), (x, 0, 1)) print('Eq.6', solve(eq6.expand().doit(), a)) eq7 = Integral(a*g(x)+f(x), (x, 0, oo)) print('Eq.7', solve(eq7.expand().doit(), a)) Output: Eq.5 [-Integral(f(x), (x, 0, 1))/2] Eq.6 [-Integral(f(x), (x, 0, 1))] Eq.7 [-Integral(f(x), (x, 0, oo))/Integral(g(x), (x, 0, oo))] This works because it allows certain operations, by playing fast and loose with the details. But, it still doesn't work, when the results are plain-wrong (try to use oo as the upper bound in eq.6 or eq.7). A: This is your equation: In [9]: eq Out[9]: ∞ ⌠ ⎮ (a⋅g(s) + f(s)) ds ⌡ 0 You would like to solve for a to make this expression equal to zero. We can rearrange this expression to extract a so that solve understands how to isolate a: In [10]: eq.expand() Out[10]: ∞ ⌠ ⎮ (a⋅g(s) + f(s)) ds ⌡ 0 In [11]: eq.expand(force=True) Out[11]: ∞ ∞ ⌠ ⌠ ⎮ a⋅g(s) ds + ⎮ f(s) ds ⌡ ⌡ 0 0 In [12]: factor_terms(eq.expand(force=True)) Out[12]: ∞ ∞ ⌠ ⌠ a⋅⎮ g(s) ds + ⎮ f(s) ds ⌡ ⌡ 0 0 In [13]: solve(factor_terms(eq.expand(force=True)), a) Out[13]: ⎡ ∞ ⎤ ⎢ ⌠ ⎥ ⎢-⎮ f(s) ds ⎥ ⎢ ⌡ ⎥ ⎢ 0 ⎥ ⎢───────────⎥ ⎢ ∞ ⎥ ⎢ ⌠ ⎥ ⎢ ⎮ g(s) ds ⎥ ⎢ ⌡ ⎥ ⎣ 0 ⎦ We have to use force=True because expand will not presume to know that an integral with an upper limit of oo converges and splitting the integral into two integrals might turn a converging integral into a sum of non-converging integrals.
Solve integral symbolically by isolating integrand in sympy
I was wondering why sympy won't solve the following problem: from sympy import * ss = symbols('s', real = True) a = symbols('a', real = True) f = Function('f') g = Function('g') eq = Integral(a*g(ss) + f(ss),(ss,0,oo)) solve(eq, a) The return is an empty solution list. I want to tell sympy enough stuff so that I get as a solution: -1*Integral(f(ss),(ss,0,oo))/Integral(g(ss),(ss,0,oo)) That is, its safe to assume integrals converge, are real-valued and non-zero. Is there any other assumption/function I can use to get the desired output? Thanks
[ "Your assumption about the expected result is still inaccurate. For the equation to have a solution, Integral(g(ss),(ss,0,oo)) must be guaranteed to be real and non-zero, which is in no way implied by your equations, so no result is returned.\nFurther, it appears that if you want to solve equations involving an Integral, you need to use doit. Take a look below\nfrom sympy import *\n\nx = symbols('x', real = True)\na = symbols('a', real = True)\nf = Function('f')\n\neq = a+Integral(f(x), (x, 0, oo))\nprint('Eq.1', solve(eq, a))\n\neq2 = Integral(a+f(x), (x, 0, oo))\nprint('Eq.2', solve(eq2.doit(), a))\n\neq3 = Integral(a+f(x), (x, 0, 1))\nprint('Eq.3', solve(eq3.doit(), a))\n\neq4 = Integral(a+2, (x, 0, 3))\nprint('Eq.4', solve(eq4, a))\nprint('Eq.4', solve(eq4.doit(), a))\n\nOutput:\nEq.1 [-Integral(f(x), (x, 0, oo))]\nEq.2 []\nEq.3 []\nEq.4 []\nEq.4 [-2]\n\nNote that eq.1 is solvable, in the sense that you can move a on one side of the equation since it is not inside a limit (integrals with infinite bounds are shorthand for the limit of an integral with the respective bound approaching infinity). However, eq.2 and eq.3 are not solvable, because the limit of a sum is equal to the sum of the limits only if they converge to a real number (and, in your case, there is no guarantee that they do).\nFinally, eq.4 is trivially solvable, but you have to use doit. In eq.1 you can get away without it.\n\nThat said, you can \"overcome\" the formalism, using expand. Take a look below.\nfrom sympy import *\n\nx = symbols('x', real = True)\na = symbols('a', real = True)\nf = Function('f')\ng = Function('g')\n\neq5 = a+Integral(a+f(x), (x, 0, 1))\nprint('Eq.5', solve(eq5.expand().doit(), a))\n\neq6 = Integral(a+f(x), (x, 0, 1))\nprint('Eq.6', solve(eq6.expand().doit(), a))\n\neq7 = Integral(a*g(x)+f(x), (x, 0, oo))\nprint('Eq.7', solve(eq7.expand().doit(), a))\n\nOutput:\nEq.5 [-Integral(f(x), (x, 0, 1))/2]\nEq.6 [-Integral(f(x), (x, 0, 1))]\nEq.7 [-Integral(f(x), (x, 0, oo))/Integral(g(x), (x, 0, oo))]\n\nThis works because it allows certain operations, by playing fast and loose with the details. But, it still doesn't work, when the results are plain-wrong (try to use oo as the upper bound in eq.6 or eq.7).\n", "This is your equation:\nIn [9]: eq\nOut[9]: \n∞ \n⌠ \n⎮ (a⋅g(s) + f(s)) ds\n⌡ \n0 \n\nYou would like to solve for a to make this expression equal to zero. We can rearrange this expression to extract a so that solve understands how to isolate a:\nIn [10]: eq.expand()\nOut[10]: \n∞ \n⌠ \n⎮ (a⋅g(s) + f(s)) ds\n⌡ \n0 \n\nIn [11]: eq.expand(force=True)\nOut[11]: \n∞ ∞ \n⌠ ⌠ \n⎮ a⋅g(s) ds + ⎮ f(s) ds\n⌡ ⌡ \n0 0 \n\nIn [12]: factor_terms(eq.expand(force=True))\nOut[12]: \n ∞ ∞ \n ⌠ ⌠ \na⋅⎮ g(s) ds + ⎮ f(s) ds\n ⌡ ⌡ \n 0 0 \n\nIn [13]: solve(factor_terms(eq.expand(force=True)), a)\nOut[13]: \n⎡ ∞ ⎤\n⎢ ⌠ ⎥\n⎢-⎮ f(s) ds ⎥\n⎢ ⌡ ⎥\n⎢ 0 ⎥\n⎢───────────⎥\n⎢ ∞ ⎥\n⎢ ⌠ ⎥\n⎢ ⎮ g(s) ds ⎥\n⎢ ⌡ ⎥\n⎣ 0 ⎦\n\nWe have to use force=True because expand will not presume to know that an integral with an upper limit of oo converges and splitting the integral into two integrals might turn a converging integral into a sum of non-converging integrals.\n" ]
[ 1, 0 ]
[]
[]
[ "python", "sympy" ]
stackoverflow_0074526345_python_sympy.txt
Q: Calling Oracle sqlldr using Python I am trying to load sqlldr using python so and i am using subprocess.call for that. cmd = 'sqlldr USERID={user}/{password}@Databse_name control={controlfile} data={datafile}' subprocess.call(cmd, shell=True) the output shows: sqlldr USERID={user}/{password}@Databse_name control={controlfile} no such directory of file Can someone help me with this and what do we add after the @ in this? A: Provide the full path, not only file names. A: Please try with shell. Make sure u have downloaded instant_client for oracle as well sqlldr. import os import subprocess BASE_DIR = Path(__file__).resolve().parent control_file = os.path.join(BASE_DIR, 'SAMPLE_ITEM_LOAD.ctrl') data_file = os.path.join(BASE_DIR, 'sample_item_load.csv') my_env = os.environ.copy() my_env["PATH"] = f"{my_env['ORACLE_HOME']}:{my_env['PATH']}" sql_con = f'{username}/{password}@{host}:{port}/{db_sid}' sql_ld_command = f'sqlldr {sql_con} CONTROL={control_file} DATA={data_file}' sql_ldr_proc = subprocess.Popen(sql_ld_command,shell=True, env=my_env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = sql_ldr_proc.communicate() retn_code = sql_ldr_proc.wait()
Calling Oracle sqlldr using Python
I am trying to load sqlldr using python so and i am using subprocess.call for that. cmd = 'sqlldr USERID={user}/{password}@Databse_name control={controlfile} data={datafile}' subprocess.call(cmd, shell=True) the output shows: sqlldr USERID={user}/{password}@Databse_name control={controlfile} no such directory of file Can someone help me with this and what do we add after the @ in this?
[ "Provide the full path, not only file names.\n", "Please try with shell. Make sure u have downloaded instant_client for oracle as well sqlldr.\nimport os\nimport subprocess\n\nBASE_DIR = Path(__file__).resolve().parent\ncontrol_file = os.path.join(BASE_DIR, 'SAMPLE_ITEM_LOAD.ctrl')\ndata_file = os.path.join(BASE_DIR, 'sample_item_load.csv')\n\nmy_env = os.environ.copy()\nmy_env[\"PATH\"] = f\"{my_env['ORACLE_HOME']}:{my_env['PATH']}\"\n\nsql_con = f'{username}/{password}@{host}:{port}/{db_sid}'\n\nsql_ld_command = f'sqlldr {sql_con} CONTROL={control_file} DATA={data_file}'\n sql_ldr_proc = subprocess.Popen(sql_ld_command,shell=True,\n env=my_env,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n \n out, err = sql_ldr_proc.communicate()\n retn_code = sql_ldr_proc.wait()\n\n" ]
[ 0, 0 ]
[]
[]
[ "oracle", "python", "shell", "sql_loader" ]
stackoverflow_0071171725_oracle_python_shell_sql_loader.txt
Q: How can I make python change the characters in a batch file? I'm making a script that changes your dns and then pings a website to test latency and I've created a list with all the DNS and I want to use an external batch script to change the dns. However, I'm reasonably new to python and I don't know how to make python take data from the list and replace it in the batch file. This would help me very much, thank you! **Python script ** from tcp_latency import measure_latency host = input("Enter host: ") def pinger(): latency = sum(measure_latency(host, port=80, runs=10, timeout=2.5)) latency = latency/10 print("Your average latency is",latency) dns = ["1.1.1.1","1.0.0.1","8.8.8.8","8.8.4.4","9.9.9.9","149.112.112.112","208.67.222.222","208.67.220.220","8.26.56.26","8.20.247.20","185.228.168.9","185.228.169.9"] Batch script @echo off cls for /F "skip=3 tokens=1,2,3* delims= " %%G in ('netsh interface show interface') DO ( IF "%%H"=="Disconnected" netsh interface set interface "%%J" enabled IF "%%H"=="Connected" netsh interface set interface "%%J" enabled echo %%J netsh interface ip set dns %%J static 1.1.1.1 ) I haven't tried any approaches just yet A: Simple string replacement should work nicely dns = ["1.1.1.1","1.0.0.1","8.8.8.8","8.8.4.4","9.9.9.9","149.112.112.112","208.67.222.222","208.67.220.220","8.26.56.26","8.20.247.20","185.228.168.9","185.228.169.9"] # Assumes .bat and .py scripts are in the same directory bat_file = "tester.bat" # Read original .bat file with open(bat_file, "r") as fs: bat_str = fs.read() base_name = bat_file.split(".")[0] for dns_ip in dns: new_bat_str = bat_str.replace("1.1.1.1", dns_ip) # Parse new name for .bat file new_bat_file = f"{base_name}_dns_{dns_ip.replace('.', '')}.bat" with open(new_bat_file, "w") as fs: fs.write(new_bat_str)
How can I make python change the characters in a batch file?
I'm making a script that changes your dns and then pings a website to test latency and I've created a list with all the DNS and I want to use an external batch script to change the dns. However, I'm reasonably new to python and I don't know how to make python take data from the list and replace it in the batch file. This would help me very much, thank you! **Python script ** from tcp_latency import measure_latency host = input("Enter host: ") def pinger(): latency = sum(measure_latency(host, port=80, runs=10, timeout=2.5)) latency = latency/10 print("Your average latency is",latency) dns = ["1.1.1.1","1.0.0.1","8.8.8.8","8.8.4.4","9.9.9.9","149.112.112.112","208.67.222.222","208.67.220.220","8.26.56.26","8.20.247.20","185.228.168.9","185.228.169.9"] Batch script @echo off cls for /F "skip=3 tokens=1,2,3* delims= " %%G in ('netsh interface show interface') DO ( IF "%%H"=="Disconnected" netsh interface set interface "%%J" enabled IF "%%H"=="Connected" netsh interface set interface "%%J" enabled echo %%J netsh interface ip set dns %%J static 1.1.1.1 ) I haven't tried any approaches just yet
[ "Simple string replacement should work nicely\ndns = [\"1.1.1.1\",\"1.0.0.1\",\"8.8.8.8\",\"8.8.4.4\",\"9.9.9.9\",\"149.112.112.112\",\"208.67.222.222\",\"208.67.220.220\",\"8.26.56.26\",\"8.20.247.20\",\"185.228.168.9\",\"185.228.169.9\"]\n\n# Assumes .bat and .py scripts are in the same directory\nbat_file = \"tester.bat\"\n\n# Read original .bat file\nwith open(bat_file, \"r\") as fs:\n bat_str = fs.read()\n\nbase_name = bat_file.split(\".\")[0]\nfor dns_ip in dns:\n new_bat_str = bat_str.replace(\"1.1.1.1\", dns_ip)\n # Parse new name for .bat file\n new_bat_file = f\"{base_name}_dns_{dns_ip.replace('.', '')}.bat\"\n with open(new_bat_file, \"w\") as fs:\n fs.write(new_bat_str)\n\n" ]
[ 0 ]
[]
[]
[ "networking", "python", "python_3.x" ]
stackoverflow_0074531403_networking_python_python_3.x.txt
Q: How do I use a datepicker on a simple Django form? Before you mark this as a duplicate to the most famous django datepicker question on SO, hear me out. I have gone through all the questions in the first ten pages of the search results, but no one seems to be explaining anything from the beginning. What I am looking for is the most simple way to have a datepicker on my form, I don't know if the most simple way is importing it from Admin or using an existing jQuery thing, but whatever it is, can someone please explain step by step like you would do to a baby? This, I believe will help any new programmer like me out there who's looking to learn. This is what I have so far. My Form: class SampleForm(forms.Form): date_of_birth = forms.DateField(label='Enter Date') My View: def dlp_test(request): form = SampleForm() return render(request, 'dlp_test.html', {'form': form}) My Template: <form action="/your-name/" method="post"> {% csrf_token %} {{ form }} <input type="submit" value="Submit" /> </form> This is the most simple setup anyone can start from, how do I take it from here? When someone clicks on the datefield in the HTML, I want a calendar to pop up so that they can select a date. If achieving this requires me to have locally stored JS or jQuery files, I'd prefer the URL be embedded in the HTML, rather than downloading and then mentioning the source, because my paths are messed up right now. You can assume that I don't have anything else downloaded or installed other than Django and Python. A: This is probably somewhat hacky, but when I want to use the jQueryUI datepicker for a specific form field I do this: Add the stylesheet in the <head> of my template: <link rel="stylesheet" href="https://code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css" /> Add the javascript file at the end of my template: <script src="https://code.jquery.com/ui/1.12.1/jquery-ui.min.js" integrity="sha256-VazP97ZCwtekAsvgPBSUwPFKdrwD3unUfSGVYrahUqU=" crossorigin="anonymous"></script> The field of your form with which you want to use the datepicker will have a specific ID. In your case it will probably be id_date_of_birth. So you can select the date of birth textbox by ID and apply the datepicker to it (this assumes you are also using jQuery): <script> $(document).ready(function() { $('#id_date_of_birth').datepicker({firstDay: 1, dateFormat: "dd/mm/yy", defaultDate: "16/06/2017", minDate: "16/06/2017", maxDate: "25/06/2017"}); }); </script> Note that this snippet has to come AFTER you include the javascript file. Also, I am setting some defaults you may not need - the simplest way to make it work would be: <script> $(document).ready(function() { $('#id_date_of_birth').datepicker(); }); </script> Hopefully that helps you out! A: This is what I added to my template and it is working now. To someone in the future looking for an answer, here it is. Although, I must tell you that this might not scale well on large projects, you might have to use this function everywhere or something like that, but for now, this works for me. <!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>jQuery UI Datepicker - Default functionality</title> <link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css"> <link rel="stylesheet" href="/resources/demos/style.css"> <script src="https://code.jquery.com/jquery-1.12.4.js"></script> <script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script> </head> <body> <form action="." method="post"> {% csrf_token %} {{form.as_p}} <p>Date: <input type="text" id="datepicker"></p> <input type="submit" value="Submit" /> </form> <script> $( function() { $( "#id_date_of_birth" ).datepicker(); $( "#datepicker" ).datepicker(); } ); </script> </body> </html> A: I recently needed to add a date field with a datepicker to a form. I did this quick so please forgive a typo or 3 :) The Jquery is referencing an id "#id_date_of_birth", but it would be better practice to make this a class like "datechooser" so you can use it on any form instead of just the "date_of_birth" form field. Models.py from django.db import models class Sample(models.Model): date_of_birth = models.DateTimeField(help_text='date_of_birth', null=True) Forms.py from django.forms import ModelForm, widgets, DateTimeField, DateField, DateInput class SampleForm(ModelForm): date_of_birth = DateTimeField(widget = DateInput(format='%Y-%m-%d'), input_formats=('%Y-%m-%d',), required=False) class Meta: model = Sample fields = ["date_of_birth",] Views.py from django.views import generic from sample.models import Sample from sample.forms import SampleForm def dlp_test(request): form = SampleForm() form = SampleForm(initial={'date_of_birth': timezone.now().date()}) # Set an initial value for today return render(request, 'dlp_test.html', {'form': form}) dlp_test.html {{ form.date_of_birth }} {{ form.date_of_birth.errors }} Datepicker via Jquery for a form field Header.html <script src="https://code.jquery.com/jquery-3.3.1.slim.min.js"></script> <link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css"> <script src="https://code.jquery.com/jquery-1.12.4.js"></script> <script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script> $( function() { $( "#id_date_of_birth" ).datepicker({ dateFormat: 'yy-mm-dd', changeMonth: true, changeYear: true }); }); A: This is what i do to get datepicker in django forms. install bootstrap_datepicker_plus by pip command. pip install django-bootstrap_datepicker_plus forms.py from .models import Hello from django import forms from bootstrap_datepicker_plus import DatePickerInput class CreateForm(forms.ModelForm): class Meta: model = Hello fields =[ "Date", ] widgets = { 'Date': DatePickerInput(), } settings.py INSTALLED_APPS = [ 'bootstrap_datepicker_plus', ] A: I searched and struggled a lot to get the problem fixed I recommend this source. In forms.py: # Create custom widget in your forms.py file. class DateInput(forms.DateInput): input_type = 'date' In the same forms.py: # Form class in forms.py class LastActiveForm(forms.Form): """ Last Active Date Form """ last_active = forms.DateField(widget=DateInput) This works perfectly with formset too. In the template file: { form.as_p } # Only without any external libraries or add-ons
How do I use a datepicker on a simple Django form?
Before you mark this as a duplicate to the most famous django datepicker question on SO, hear me out. I have gone through all the questions in the first ten pages of the search results, but no one seems to be explaining anything from the beginning. What I am looking for is the most simple way to have a datepicker on my form, I don't know if the most simple way is importing it from Admin or using an existing jQuery thing, but whatever it is, can someone please explain step by step like you would do to a baby? This, I believe will help any new programmer like me out there who's looking to learn. This is what I have so far. My Form: class SampleForm(forms.Form): date_of_birth = forms.DateField(label='Enter Date') My View: def dlp_test(request): form = SampleForm() return render(request, 'dlp_test.html', {'form': form}) My Template: <form action="/your-name/" method="post"> {% csrf_token %} {{ form }} <input type="submit" value="Submit" /> </form> This is the most simple setup anyone can start from, how do I take it from here? When someone clicks on the datefield in the HTML, I want a calendar to pop up so that they can select a date. If achieving this requires me to have locally stored JS or jQuery files, I'd prefer the URL be embedded in the HTML, rather than downloading and then mentioning the source, because my paths are messed up right now. You can assume that I don't have anything else downloaded or installed other than Django and Python.
[ "This is probably somewhat hacky, but when I want to use the jQueryUI datepicker for a specific form field I do this:\nAdd the stylesheet in the <head> of my template:\n<link rel=\"stylesheet\" href=\"https://code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css\" />\nAdd the javascript file at the end of my template:\n<script src=\"https://code.jquery.com/ui/1.12.1/jquery-ui.min.js\"\n integrity=\"sha256-VazP97ZCwtekAsvgPBSUwPFKdrwD3unUfSGVYrahUqU=\"\n crossorigin=\"anonymous\"></script>\nThe field of your form with which you want to use the datepicker will have a specific ID. In your case it will probably be id_date_of_birth. So you can select the date of birth textbox by ID and apply the datepicker to it (this assumes you are also using jQuery):\n<script>\n $(document).ready(function() {\n $('#id_date_of_birth').datepicker({firstDay: 1,\n dateFormat: \"dd/mm/yy\",\n defaultDate: \"16/06/2017\",\n minDate: \"16/06/2017\",\n maxDate: \"25/06/2017\"});\n });\n</script>\n\nNote that this snippet has to come AFTER you include the javascript file. Also, I am setting some defaults you may not need - the simplest way to make it work would be:\n<script>\n $(document).ready(function() {\n $('#id_date_of_birth').datepicker();\n });\n</script>\n\nHopefully that helps you out!\n", "This is what I added to my template and it is working now. To someone in the future looking for an answer, here it is. Although, I must tell you that this might not scale well on large projects, you might have to use this function everywhere or something like that, but for now, this works for me.\n<!doctype html>\n<html lang=\"en\">\n<head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <title>jQuery UI Datepicker - Default functionality</title>\n <link rel=\"stylesheet\" href=\"//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css\">\n <link rel=\"stylesheet\" href=\"/resources/demos/style.css\">\n <script src=\"https://code.jquery.com/jquery-1.12.4.js\"></script>\n <script src=\"https://code.jquery.com/ui/1.12.1/jquery-ui.js\"></script>\n </head>\n\n<body>\n<form action=\".\" method=\"post\">\n {% csrf_token %}\n {{form.as_p}}\n <p>Date: <input type=\"text\" id=\"datepicker\"></p>\n <input type=\"submit\" value=\"Submit\" />\n</form>\n\n\n<script>\n\n $( function()\n {\n $( \"#id_date_of_birth\" ).datepicker();\n $( \"#datepicker\" ).datepicker();\n } );\n</script>\n\n\n</body>\n</html>\n\n", "I recently needed to add a date field with a datepicker to a form. I did this quick so please forgive a typo or 3 :)\nThe Jquery is referencing an id \"#id_date_of_birth\", but it would be better practice to make this a class like \"datechooser\" so you can use it on any form instead of just the \"date_of_birth\" form field.\nModels.py\nfrom django.db import models\n\nclass Sample(models.Model):\n date_of_birth = models.DateTimeField(help_text='date_of_birth', null=True)\n\nForms.py\nfrom django.forms import ModelForm, widgets, DateTimeField, DateField, DateInput\n\nclass SampleForm(ModelForm):\n date_of_birth = DateTimeField(widget = DateInput(format='%Y-%m-%d'),\n input_formats=('%Y-%m-%d',),\n required=False)\n class Meta:\n model = Sample\n fields = [\"date_of_birth\",]\n\nViews.py\nfrom django.views import generic\nfrom sample.models import Sample\nfrom sample.forms import SampleForm\n\ndef dlp_test(request):\n form = SampleForm()\n form = SampleForm(initial={'date_of_birth': timezone.now().date()}) # Set an initial value for today\n\n return render(request, 'dlp_test.html', {'form': form})\n\ndlp_test.html\n{{ form.date_of_birth }}\n{{ form.date_of_birth.errors }}\n\nDatepicker via Jquery for a form field\nHeader.html\n<script src=\"https://code.jquery.com/jquery-3.3.1.slim.min.js\"></script>\n<link rel=\"stylesheet\" href=\"//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css\">\n <script src=\"https://code.jquery.com/jquery-1.12.4.js\"></script>\n <script src=\"https://code.jquery.com/ui/1.12.1/jquery-ui.js\"></script>\n\n $( function() {\n $( \"#id_date_of_birth\" ).datepicker({\n dateFormat: 'yy-mm-dd',\n changeMonth: true,\n changeYear: true\n });\n });\n\n", "This is what i do to get datepicker in django forms.\ninstall bootstrap_datepicker_plus by pip command.\npip install django-bootstrap_datepicker_plus\nforms.py\nfrom .models import Hello\nfrom django import forms\nfrom bootstrap_datepicker_plus import DatePickerInput\n\nclass CreateForm(forms.ModelForm):\n class Meta:\n model = Hello\n fields =[ \n \"Date\",\n ]\n\n widgets = {\n 'Date': DatePickerInput(),\n }\n\nsettings.py\nINSTALLED_APPS = [\n 'bootstrap_datepicker_plus',\n]\n\n", "I searched and struggled a lot to get the problem fixed\nI recommend\nthis source.\nIn forms.py:\n# Create custom widget in your forms.py file.\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\nIn the same forms.py:\n# Form class in forms.py\nclass LastActiveForm(forms.Form):\n \"\"\"\n Last Active Date Form\n \"\"\"\n last_active = forms.DateField(widget=DateInput)\n\nThis works perfectly with formset too.\nIn the template file:\n{ form.as_p }\n\n# Only without any external libraries or add-ons\n\n" ]
[ 2, 0, 0, 0, 0 ]
[]
[]
[ "django", "python" ]
stackoverflow_0042165163_django_python.txt
Q: Google Workspace API, API call to create a user? We're using code similar to this for creating the user. However, we get a 400 Error when we call the API. What is the correct way to call the API? from __future__ import print_function import os.path from google.auth.transport.requests import Request from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient.discovery import build # If modifying these scopes, delete the file token.json. SCOPES = ['https://www.googleapis.com/auth/admin.directory.user'] def main():     """Shows basic usage of the Admin SDK Directory API.     Prints the emails and names of the first 10 users in the domain.     """     creds = None     # The file token.json stores the user's access and refresh tokens, and is     # created automatically when the authorization flow completes for the first     # time.     if os.path.exists('token.json'):         creds = Credentials.from_authorized_user_file('token.json', SCOPES)     # If there are no (valid) credentials available, let the user log in.     if not creds or not creds.valid:         if creds and creds.expired and creds.refresh_token:             creds.refresh(Request())         else:             flow = InstalledAppFlow.from_client_secrets_file(                 'credentials.json', SCOPES)             creds = flow.run_local_server(port=0)         # Save the credentials for the next run         with open('token.json', 'w') as token:             token.write(creds.to_json())     service = build('admin', 'directory_v1', credentials=creds)     results = service.users().create(customer='customer_name_here').execute() if __name__ == '__main__':     main() We tried to add other arguments to the list but that also didn't work. We tried to find the docs for the API but couldn't find it. A: You appear to have an issue with how you are creating the user, who you are inserting. from google.auth.transport.requests import Request from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient.discovery import build # If modifying these scopes, delete the file token.json. SCOPES = ['https://www.googleapis.com/auth/admin.directory.user'] CREDENTIALS = 'C:\Development\FreeLance\GoogleSamples\Credentials\Workspace-Installed-TestEverything.json' STORED_USER_TOKEN = 'createUserToken.json' def main(): """Shows basic usage of the Admin SDK Directory API. Prints the emails and names of the first 10 users in the domain. """ creds = None # The file token.json stores the user's access and refresh tokens, and is # created automatically when the authorization flow completes for the first # time. if os.path.exists(STORED_USER_TOKEN): creds = Credentials.from_authorized_user_file(STORED_USER_TOKEN, SCOPES) # If there are no (valid) credentials available, let the user log in. if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( CREDENTIALS, SCOPES) creds = flow.run_local_server(port=0) # Save the credentials for the next run with open(STORED_USER_TOKEN, 'w') as token: token.write(creds.to_json()) service = build('admin', 'directory_v1', credentials=creds) new_user = { "name": { "givenName": "Contact", "familyName": "Daimto", }, "primaryEmail": "xxx@daimto.com", "recoveryEmail": "xxxx@daimto.com", "password": "Temp42!!!", "changePasswordAtNextLogin": True } response = service.users().insert(body=new_user ).execute() if __name__ == '__main__': main()
Google Workspace API, API call to create a user?
We're using code similar to this for creating the user. However, we get a 400 Error when we call the API. What is the correct way to call the API? from __future__ import print_function import os.path from google.auth.transport.requests import Request from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient.discovery import build # If modifying these scopes, delete the file token.json. SCOPES = ['https://www.googleapis.com/auth/admin.directory.user'] def main():     """Shows basic usage of the Admin SDK Directory API.     Prints the emails and names of the first 10 users in the domain.     """     creds = None     # The file token.json stores the user's access and refresh tokens, and is     # created automatically when the authorization flow completes for the first     # time.     if os.path.exists('token.json'):         creds = Credentials.from_authorized_user_file('token.json', SCOPES)     # If there are no (valid) credentials available, let the user log in.     if not creds or not creds.valid:         if creds and creds.expired and creds.refresh_token:             creds.refresh(Request())         else:             flow = InstalledAppFlow.from_client_secrets_file(                 'credentials.json', SCOPES)             creds = flow.run_local_server(port=0)         # Save the credentials for the next run         with open('token.json', 'w') as token:             token.write(creds.to_json())     service = build('admin', 'directory_v1', credentials=creds)     results = service.users().create(customer='customer_name_here').execute() if __name__ == '__main__':     main() We tried to add other arguments to the list but that also didn't work. We tried to find the docs for the API but couldn't find it.
[ "You appear to have an issue with how you are creating the user, who you are inserting.\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\n\n\n# If modifying these scopes, delete the file token.json.\nSCOPES = ['https://www.googleapis.com/auth/admin.directory.user']\n\nCREDENTIALS = 'C:\\Development\\FreeLance\\GoogleSamples\\Credentials\\Workspace-Installed-TestEverything.json'\n\nSTORED_USER_TOKEN = 'createUserToken.json'\n\n\ndef main():\n \"\"\"Shows basic usage of the Admin SDK Directory API.\n Prints the emails and names of the first 10 users in the domain.\n \"\"\"\n creds = None\n # The file token.json stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists(STORED_USER_TOKEN):\n creds = Credentials.from_authorized_user_file(STORED_USER_TOKEN, SCOPES)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n CREDENTIALS, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(STORED_USER_TOKEN, 'w') as token:\n token.write(creds.to_json())\n\n service = build('admin', 'directory_v1', credentials=creds)\n\n\n new_user = {\n \"name\": {\n \"givenName\": \"Contact\",\n \"familyName\": \"Daimto\",\n },\n \"primaryEmail\": \"xxx@daimto.com\",\n \"recoveryEmail\": \"xxxx@daimto.com\",\n \"password\": \"Temp42!!!\",\n \"changePasswordAtNextLogin\": True\n }\n response = service.users().insert(body=new_user ).execute()\n\n\nif __name__ == '__main__':\n main()\n\n" ]
[ 0 ]
[]
[]
[ "google_api", "google_api_python_client", "google_oauth", "google_workspace", "python" ]
stackoverflow_0074531157_google_api_google_api_python_client_google_oauth_google_workspace_python.txt
Q: Tried creating binary decision variable in place of conditional if statement in Gurobi. Getting constraint error I have followed all existing discussion posts and instructions on how to code conditional constraints in Gurobi. I cannot figure out why I am getting this error. GurobiError: Constraint has no bool value (are you trying "lb <= expr <= ub"?) See below code snippet from python script: b = {} gap = {} for k in range(start_yr,end_yr):     for i in multi_df:         reduction[start_yr,i] = 0         reduction[k+1,i] = 0         for j in range(len(multi_df[i]['COMPLIANCE'])):             for u in multi_df[i]['utilities']:               reduction[k+1,i] += td_matrix[i,j,k]*multi_df[i]['COMPLIANCE']['CO2 reduction (Tons CO2e)'].iloc[j]*(multi_df[i]['utilities'][u]['tCO2_per_dict'][k]/multi_df[i]['utilities'][u]['tCO2_per_dict'][start_yr])         multi_df[i]['annual_total2'] = {}         multi_df[i]['annual_total2'][str(k)] = m.addVar(vtype=gp.GRB.CONTINUOUS, name="x")         multi_df[i]['annual_total2'][str(k)] = multi_df[i]['annual_total'][str(k)]-sum(reduction[k,i] for k in range(start_yr,k))         m.addConstr(multi_df[i]['annual_total2'][str(k)], gp.GRB.GREATER_EQUAL,0)                  b[k,i] = m.addVar(vtype=gp.GRB.BINARY, name='b')         gap[k,i] = m.addVar(lb=0,vtype=gp.GRB.CONTINUOUS,name='gap')         M = 15000                  m.addConstr(multi_df[i]['annual_total2'][str(k)] >= multi_df[i]['SQFT']*(em_fac[k]) - M*(1-count_tax[k,i]))         m.addConstr(multi_df[i]['annual_total2'][str(k)] <= multi_df[i]['SQFT']*(em_fac[k]) + M*count_tax[k,i])         gap[k,i] = b[k,i]*(multi_df[i]['annual_total2'][str(k)] - multi_df[i]['SQFT']*(em_fac[k]))*cost     I get the above error for the below lines with my 2 Big M constraints: m.addConstr(multi_df[i]['annual_total2'][str(k)] >= multi_df[i]['SQFT']*(em_fac[k]) - M*(1-count_tax[k,i]))          I initially tried using basic if else statements but ran into gurobi errors saying I had to go this big M route. A: Your code is very hard to read - please reformulate and post an MRE as suggested in the comments. I suspect that multi_df[i][...][...] already contains a linear expression and is not just holding a Gurobi variable. Hence, the warning about multiple <= or >= in one constraint.
Tried creating binary decision variable in place of conditional if statement in Gurobi. Getting constraint error
I have followed all existing discussion posts and instructions on how to code conditional constraints in Gurobi. I cannot figure out why I am getting this error. GurobiError: Constraint has no bool value (are you trying "lb <= expr <= ub"?) See below code snippet from python script: b = {} gap = {} for k in range(start_yr,end_yr):     for i in multi_df:         reduction[start_yr,i] = 0         reduction[k+1,i] = 0         for j in range(len(multi_df[i]['COMPLIANCE'])):             for u in multi_df[i]['utilities']:               reduction[k+1,i] += td_matrix[i,j,k]*multi_df[i]['COMPLIANCE']['CO2 reduction (Tons CO2e)'].iloc[j]*(multi_df[i]['utilities'][u]['tCO2_per_dict'][k]/multi_df[i]['utilities'][u]['tCO2_per_dict'][start_yr])         multi_df[i]['annual_total2'] = {}         multi_df[i]['annual_total2'][str(k)] = m.addVar(vtype=gp.GRB.CONTINUOUS, name="x")         multi_df[i]['annual_total2'][str(k)] = multi_df[i]['annual_total'][str(k)]-sum(reduction[k,i] for k in range(start_yr,k))         m.addConstr(multi_df[i]['annual_total2'][str(k)], gp.GRB.GREATER_EQUAL,0)                  b[k,i] = m.addVar(vtype=gp.GRB.BINARY, name='b')         gap[k,i] = m.addVar(lb=0,vtype=gp.GRB.CONTINUOUS,name='gap')         M = 15000                  m.addConstr(multi_df[i]['annual_total2'][str(k)] >= multi_df[i]['SQFT']*(em_fac[k]) - M*(1-count_tax[k,i]))         m.addConstr(multi_df[i]['annual_total2'][str(k)] <= multi_df[i]['SQFT']*(em_fac[k]) + M*count_tax[k,i])         gap[k,i] = b[k,i]*(multi_df[i]['annual_total2'][str(k)] - multi_df[i]['SQFT']*(em_fac[k]))*cost     I get the above error for the below lines with my 2 Big M constraints: m.addConstr(multi_df[i]['annual_total2'][str(k)] >= multi_df[i]['SQFT']*(em_fac[k]) - M*(1-count_tax[k,i]))          I initially tried using basic if else statements but ran into gurobi errors saying I had to go this big M route.
[ "Your code is very hard to read - please reformulate and post an MRE as suggested in the comments.\nI suspect that multi_df[i][...][...] already contains a linear expression and is not just holding a Gurobi variable. Hence, the warning about multiple <= or >= in one constraint.\n" ]
[ 0 ]
[]
[]
[ "gurobi", "optimization", "python" ]
stackoverflow_0074470311_gurobi_optimization_python.txt
Q: Error while using multiprocessing in Pygame i'm making a text-based RPG and am trying to use multiproccessing to run both the pygame check function and the game function at the same time. This is my first time using multiprocessing so i'm not entirely sure what is going on. Here is the (important) code: from csv import reader import pygame from sys import exit import multiprocessing def check(): for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() exit() def startgame(): play(input("Present Command\n")) play_game_process = multiprocessing.Process(target=startgame) play_game_process.start() check_process = multiprocessing.Process(target=check) check_process.start() When i run the code, i am hit with a very large error message: pygame 2.1.2 (SDL 2.0.18, Python 3.10.8) Hello from the pygame community. https://www.pygame.org/contribute.html pygame 2.1.2 (SDL 2.0.18, Python 3.10.8) Hello from the pygame community. https://www.pygame.org/contribute.html pygame 2.1.2 (SDL 2.0.18, Python 3.10.8) Hello from the pygame community. https://www.pygame.org/contribute.html Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 289, in run_path return _run_module_code(code, init_globals, run_name, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "c:\Users\Dougl\Documents\Coding\Python\games\Command\command.py", line 193, in <module> play_game_process.start() File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\context.py", line 224, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\context.py", line 336, in _Popen return Popen(process_obj) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 289, in run_path return _run_module_code(code, init_globals, run_name, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "c:\Users\Dougl\Documents\Coding\Python\games\Command\command.py", line 193, in <module> play_game_process.start() File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\context.py", line 224, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\context.py", line 336, in _Popen return Popen(process_obj) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. I am not sure what is going on here, please help if you can! A: Did you read the end of the error message, and the very clear instructions there? When using multiprocessing, your main code needs to be wrapped in the if __name__ == "__main__": idiom (this is also documented in the multiprocessing docs). I.e. something like import multiprocessing from sys import exit import pygame def check(): for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() exit() def startgame(): play(input("Present Command\n")) def main(): play_game_process = multiprocessing.Process(target=startgame) play_game_process.start() check_process = multiprocessing.Process(target=check) check_process.start() if __name__ == '__main__': main() Note, however, that that probably won't do what you want, since the check and startgame functions will be run in entirely different processes which don't know about each other at all.
Error while using multiprocessing in Pygame
i'm making a text-based RPG and am trying to use multiproccessing to run both the pygame check function and the game function at the same time. This is my first time using multiprocessing so i'm not entirely sure what is going on. Here is the (important) code: from csv import reader import pygame from sys import exit import multiprocessing def check(): for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() exit() def startgame(): play(input("Present Command\n")) play_game_process = multiprocessing.Process(target=startgame) play_game_process.start() check_process = multiprocessing.Process(target=check) check_process.start() When i run the code, i am hit with a very large error message: pygame 2.1.2 (SDL 2.0.18, Python 3.10.8) Hello from the pygame community. https://www.pygame.org/contribute.html pygame 2.1.2 (SDL 2.0.18, Python 3.10.8) Hello from the pygame community. https://www.pygame.org/contribute.html pygame 2.1.2 (SDL 2.0.18, Python 3.10.8) Hello from the pygame community. https://www.pygame.org/contribute.html Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 289, in run_path return _run_module_code(code, init_globals, run_name, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "c:\Users\Dougl\Documents\Coding\Python\games\Command\command.py", line 193, in <module> play_game_process.start() File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\context.py", line 224, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\context.py", line 336, in _Popen return Popen(process_obj) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. Traceback (most recent call last): File "<string>", line 1, in <module> File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 116, in spawn_main exitcode = _main(fd, parent_sentinel) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 125, in _main prepare(preparation_data) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 236, in prepare _fixup_main_from_path(data['init_main_from_path']) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path main_content = runpy.run_path(main_path, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 289, in run_path return _run_module_code(code, init_globals, run_name, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 96, in _run_module_code _run_code(code, mod_globals, init_globals, File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "c:\Users\Dougl\Documents\Coding\Python\games\Command\command.py", line 193, in <module> play_game_process.start() File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\process.py", line 121, in start self._popen = self._Popen(self) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\context.py", line 224, in _Popen return _default_context.get_context().Process._Popen(process_obj) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\context.py", line 336, in _Popen return Popen(process_obj) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\popen_spawn_win32.py", line 45, in __init__ prep_data = spawn.get_preparation_data(process_obj._name) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 154, in get_preparation_data _check_not_importing_main() File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.10_3.10.2288.0_x64__qbz5n2kfra8p0\lib\multiprocessing\spawn.py", line 134, in _check_not_importing_main raise RuntimeError(''' RuntimeError: An attempt has been made to start a new process before the current process has finished its bootstrapping phase. This probably means that you are not using fork to start your child processes and you have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce an executable. I am not sure what is going on here, please help if you can!
[ "Did you read the end of the error message, and the very clear instructions there? When using multiprocessing, your main code needs to be wrapped in the if __name__ == \"__main__\": idiom (this is also documented in the multiprocessing docs).\nI.e. something like\nimport multiprocessing\nfrom sys import exit\n\nimport pygame\n\n\ndef check():\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n\ndef startgame():\n play(input(\"Present Command\\n\"))\n\n\ndef main():\n play_game_process = multiprocessing.Process(target=startgame)\n play_game_process.start()\n\n check_process = multiprocessing.Process(target=check)\n check_process.start()\n\n\nif __name__ == '__main__':\n main()\n\nNote, however, that that probably won't do what you want, since the check and startgame functions will be run in entirely different processes which don't know about each other at all.\n" ]
[ 0 ]
[]
[]
[ "multiprocessing", "pygame", "python", "python_multiprocessing", "rpg" ]
stackoverflow_0074531737_multiprocessing_pygame_python_python_multiprocessing_rpg.txt
Q: Trim leading zero's using python pandas without changing the datatype of any columns I have a csv file of around 42000 lines and around 80 columns, from which I need to remove leading Zero's, hence I am using Pandas to_csv and saving it back to text file by which leading Zero's are removed. Any column may contain null values in any row, but those columns are getting converted to Float datatype and getting decimal values as output, I want to avoid that scenario. For example, below is a sample of my original file. 0000055|O|Price Rite Marketplace|361|1600 Memorial Dr|Chicopee|MA|010203933|Chicopee|25|013|USA|05|1|H|C|42.2001|-72.5731|A|250138113012012|||10 0000071|O|Big Es Supermarket|189|11 Union St|Easthampton|MA|010271417|Easthampton|25|015|USA|05|5|A|I|42.2697|-72.6717|A|250158224021037|| 0000084|O|Big Y Supermarket|14|441 N Main St|East Longmeadow|MA|010281804|East Longmeadow|25|013|USA|05|5|G|C|42.0788|-72.5280|A|250138134012011|||15 0000101|O|Stop & Shop|95|440 Russell St|Hadley|MA|010359566|Hadley|25|015|USA|05|5|K|C|42.3644|-72.5382|A|250158214004004|||14 0000139|O|Key Food Marketplace|2508|13 Cabot St|Holyoke|MA|010406055|Holyoke|25|013|USA|05|5|A|C|42.1980|-72.6042|A|250138115002019|||06 0000149|O|Stop & Shop|9|28 Lincoln St|Holyoke|MA|010403472|Holyoke|25|013|USA|05|5|K|C|42.2150|-72.6172|A|250138118005012|||13 I used the below method to convert to import pandas as pd df = pd.read_csv(r"/home/ter/stest/cminxte1.txt", sep="|") df.to_csv(r"/home/ter/stest/cminxte.txt", sep='|', index=False) The output file looks like 55|O|Price Rite Marketplace|361|1600 Memorial Dr|Chicopee|MA|10203933|Chicopee|25|13|USA|5|1|H|C|42.2001|-72.5731|A|250138113012012|||10.0 71|O|Big Es Supermarket|189|11 Union St|Easthampton|MA|10271417|Easthampton|25|15|USA|5|5|A|I|42.2697|-72.6717|A|250158224021037|| 84|O|Big Y Supermarket|14|441 N Main St|East Longmeadow|MA|10281804|East Longmeadow|25|13|USA|5|5|G|C|42.0788|-72.528|A|250138134012011|||15.0 101|O|Stop & Shop|95|440 Russell St|Hadley|MA|10359566|Hadley|25|15|USA|5|5|K|C|42.3644|-72.5382|A|250158214004004|||14.0 139|O|Key Food Marketplace|2508|13 Cabot St|Holyoke|MA|10406055|Holyoke|25|13|USA|5|5|A|C|42.198|-72.6042|A|250138115002019|||6.0 149|O|Stop & Shop|9|28 Lincoln St|Holyoke|MA|10403472|Holyoke|25|13|USA|5|5|K|C|42.215|-72.6172|A|250138118005012|||13.0 It has removed all the leading Zero's in all the columns as expected, however, at the last column, it is converting to float with decimal values as that column has got null values. Any idea on how can this be rectified? A: First convert all values to strings and in next step remove trailing zeros: df = pd.read_csv(r"/home/ter/stest/cminxte1.txt", sep="|", dtype=str) df = df.apply(lambda x: x.str.lstrip('0')) df.to_csv(r"/home/ter/stest/cminxte.txt", sep='|', index=False)
Trim leading zero's using python pandas without changing the datatype of any columns
I have a csv file of around 42000 lines and around 80 columns, from which I need to remove leading Zero's, hence I am using Pandas to_csv and saving it back to text file by which leading Zero's are removed. Any column may contain null values in any row, but those columns are getting converted to Float datatype and getting decimal values as output, I want to avoid that scenario. For example, below is a sample of my original file. 0000055|O|Price Rite Marketplace|361|1600 Memorial Dr|Chicopee|MA|010203933|Chicopee|25|013|USA|05|1|H|C|42.2001|-72.5731|A|250138113012012|||10 0000071|O|Big Es Supermarket|189|11 Union St|Easthampton|MA|010271417|Easthampton|25|015|USA|05|5|A|I|42.2697|-72.6717|A|250158224021037|| 0000084|O|Big Y Supermarket|14|441 N Main St|East Longmeadow|MA|010281804|East Longmeadow|25|013|USA|05|5|G|C|42.0788|-72.5280|A|250138134012011|||15 0000101|O|Stop & Shop|95|440 Russell St|Hadley|MA|010359566|Hadley|25|015|USA|05|5|K|C|42.3644|-72.5382|A|250158214004004|||14 0000139|O|Key Food Marketplace|2508|13 Cabot St|Holyoke|MA|010406055|Holyoke|25|013|USA|05|5|A|C|42.1980|-72.6042|A|250138115002019|||06 0000149|O|Stop & Shop|9|28 Lincoln St|Holyoke|MA|010403472|Holyoke|25|013|USA|05|5|K|C|42.2150|-72.6172|A|250138118005012|||13 I used the below method to convert to import pandas as pd df = pd.read_csv(r"/home/ter/stest/cminxte1.txt", sep="|") df.to_csv(r"/home/ter/stest/cminxte.txt", sep='|', index=False) The output file looks like 55|O|Price Rite Marketplace|361|1600 Memorial Dr|Chicopee|MA|10203933|Chicopee|25|13|USA|5|1|H|C|42.2001|-72.5731|A|250138113012012|||10.0 71|O|Big Es Supermarket|189|11 Union St|Easthampton|MA|10271417|Easthampton|25|15|USA|5|5|A|I|42.2697|-72.6717|A|250158224021037|| 84|O|Big Y Supermarket|14|441 N Main St|East Longmeadow|MA|10281804|East Longmeadow|25|13|USA|5|5|G|C|42.0788|-72.528|A|250138134012011|||15.0 101|O|Stop & Shop|95|440 Russell St|Hadley|MA|10359566|Hadley|25|15|USA|5|5|K|C|42.3644|-72.5382|A|250158214004004|||14.0 139|O|Key Food Marketplace|2508|13 Cabot St|Holyoke|MA|10406055|Holyoke|25|13|USA|5|5|A|C|42.198|-72.6042|A|250138115002019|||6.0 149|O|Stop & Shop|9|28 Lincoln St|Holyoke|MA|10403472|Holyoke|25|13|USA|5|5|K|C|42.215|-72.6172|A|250138118005012|||13.0 It has removed all the leading Zero's in all the columns as expected, however, at the last column, it is converting to float with decimal values as that column has got null values. Any idea on how can this be rectified?
[ "First convert all values to strings and in next step remove trailing zeros:\ndf = pd.read_csv(r\"/home/ter/stest/cminxte1.txt\", sep=\"|\", dtype=str) \ndf = df.apply(lambda x: x.str.lstrip('0'))\ndf.to_csv(r\"/home/ter/stest/cminxte.txt\", sep='|', index=False)\n\n" ]
[ 1 ]
[]
[]
[ "export_to_csv", "pandas", "python" ]
stackoverflow_0074531771_export_to_csv_pandas_python.txt
Q: How do I solve "pythoncom39.dll could not be located error"? It has started appearing ever since I installed Anaconda on my PC. It doesn't affect anything and when I press "Ok" it goes away. But it is quite annoying and I would like to know the reason. It has only appeared when I try to run a development server in Django or try to install python modules using pip. Is there any way to solve this? A: It happens because anacondaa3\Library\bin\ in this folder pythondicom39.dll has crashed you need to replace it with a new file A: Yes, that dll file might be corrupted. Just replace, and then try it. You can download the pythoncom39.dll files from the following link https://freeonlinestudies.com/python-dlls/
How do I solve "pythoncom39.dll could not be located error"?
It has started appearing ever since I installed Anaconda on my PC. It doesn't affect anything and when I press "Ok" it goes away. But it is quite annoying and I would like to know the reason. It has only appeared when I try to run a development server in Django or try to install python modules using pip. Is there any way to solve this?
[ "It happens because anacondaa3\\Library\\bin\\ in this folder pythondicom39.dll has crashed you need to replace it with a new file\n", "Yes, that dll file might be corrupted. Just replace, and then try it. You can download the pythoncom39.dll files from the following link\nhttps://freeonlinestudies.com/python-dlls/\n" ]
[ 0, 0 ]
[]
[]
[ "anaconda", "pip", "python" ]
stackoverflow_0070557619_anaconda_pip_python.txt
Q: Remove part of a string from pd.to_datetime() unconverted values I tried to convert a column of dates to datetime using pd.to_datetime(df, format='%Y-%m-%d_%H-%M-%S') but I received the error ValueError: unconverted data remains: .1 I ran: data.loc[pd.to_datetime(data.date, format='%Y-%m-%d_%H-%M-%S', errors='coerce').isnull(), 'date'] to identify the problem. 119/1037808 dates in the date column have an extra ".1" at the end of them. Other than the ".1", the dates are fine. How can I remove the ".1" from the end of those dates only and then convert the column values to datetime? Here is an example dataframe that recreates the issue: import pandas as pd data = pd.DataFrame({"date" : ["2022-01-15_08-11-00.1","2022-01-15_08-11-30","2022-01-15_08-12-00.1", "2022-01-15_08-12-30"], "value" : [1,2,3,4]}) I have tried: data.date = data.date.replace(".1", "") and data = data.replace(".1", "") but these did not remove the ".1". The final result should look like this: data = pd.DataFrame({"date" : ["2022-01-15_08-11-00","2022-01-15_08-11-30","2022-01-15_08-12-00", "2022-01-15_08-12-30"], "value" : [1,2,3,4]}) A: You can use pandas.Series.replace to get rid of the extra dot/number : data["date"]= pd.to_datetime(data["date"].replace(r"\.\d+", "", regex=True), format="%Y-%m-%d_%H-%M-%S") # Output : print(data) print(data.dtypes) date value 0 2022-01-15 08:11:00 1 1 2022-01-15 08:11:30 2 2 2022-01-15 08:12:00 3 3 2022-01-15 08:12:30 4 date datetime64[ns] value int64 dtype: object If you don't want a datetime format, use just data["date"].replace(r"\.\d+", "", regex=True) A: data = data.assign(date=pd.to_datetime(data['date'].str.split('\.').str[0], format="%Y-%m-%d_%H-%M-%S"))
Remove part of a string from pd.to_datetime() unconverted values
I tried to convert a column of dates to datetime using pd.to_datetime(df, format='%Y-%m-%d_%H-%M-%S') but I received the error ValueError: unconverted data remains: .1 I ran: data.loc[pd.to_datetime(data.date, format='%Y-%m-%d_%H-%M-%S', errors='coerce').isnull(), 'date'] to identify the problem. 119/1037808 dates in the date column have an extra ".1" at the end of them. Other than the ".1", the dates are fine. How can I remove the ".1" from the end of those dates only and then convert the column values to datetime? Here is an example dataframe that recreates the issue: import pandas as pd data = pd.DataFrame({"date" : ["2022-01-15_08-11-00.1","2022-01-15_08-11-30","2022-01-15_08-12-00.1", "2022-01-15_08-12-30"], "value" : [1,2,3,4]}) I have tried: data.date = data.date.replace(".1", "") and data = data.replace(".1", "") but these did not remove the ".1". The final result should look like this: data = pd.DataFrame({"date" : ["2022-01-15_08-11-00","2022-01-15_08-11-30","2022-01-15_08-12-00", "2022-01-15_08-12-30"], "value" : [1,2,3,4]})
[ "You can use pandas.Series.replace to get rid of the extra dot/number :\ndata[\"date\"]= pd.to_datetime(data[\"date\"].replace(r\"\\.\\d+\", \"\",\n regex=True),\n format=\"%Y-%m-%d_%H-%M-%S\")\n\n# Output :\nprint(data)\nprint(data.dtypes)\n\n date value\n0 2022-01-15 08:11:00 1\n1 2022-01-15 08:11:30 2\n2 2022-01-15 08:12:00 3\n3 2022-01-15 08:12:30 4\ndate datetime64[ns]\nvalue int64\ndtype: object\n\nIf you don't want a datetime format, use just data[\"date\"].replace(r\"\\.\\d+\", \"\", regex=True)\n", "data = data.assign(date=pd.to_datetime(data['date'].str.split('\\.').str[0], format=\"%Y-%m-%d_%H-%M-%S\"))\n\n" ]
[ 1, 0 ]
[]
[]
[ "datetime", "pandas", "python", "string" ]
stackoverflow_0074531567_datetime_pandas_python_string.txt
Q: Is there a way to give ID to other tables based on an ID with one table? I have two tables as following: ID Name Age 1 aaa 23 2 bbb 21 3 ccc 25 4 ddd 20 ID Name Age Phone aaa 23 0000 bbb 21 1111 ccc 28 2222 ddd 29 3333 The first table name as T1 include ID that I gave them unique ID, however from the second table T2 the ID column is empty. How can I add the same ID to the second table T2? Note: The order of the names is different, and I have about 3000 records. Can anyone help me how to do that by Microsoft Excel or Access or by programming language python? Regards, A: Try with an update query having a subquery: Update T2 Set T2.ID = (Select Top 1 T1.Id From T1 Where T1.Name = T2.Name)
Is there a way to give ID to other tables based on an ID with one table?
I have two tables as following: ID Name Age 1 aaa 23 2 bbb 21 3 ccc 25 4 ddd 20 ID Name Age Phone aaa 23 0000 bbb 21 1111 ccc 28 2222 ddd 29 3333 The first table name as T1 include ID that I gave them unique ID, however from the second table T2 the ID column is empty. How can I add the same ID to the second table T2? Note: The order of the names is different, and I have about 3000 records. Can anyone help me how to do that by Microsoft Excel or Access or by programming language python? Regards,
[ "Try with an update query having a subquery:\nUpdate T2\nSet T2.ID = \n (Select Top 1 T1.Id \n From T1\n Where T1.Name = T2.Name)\n\n" ]
[ 0 ]
[]
[]
[ "csv", "excel", "ms_access", "pandas", "python" ]
stackoverflow_0074529743_csv_excel_ms_access_pandas_python.txt
Q: How do i generate different new objects with random attributes every time i run a function? I am making a project where you can manage the passengers on a buss. I have created the class system, and know how to generate the random attributes. But I don't know how to generate multiple new persons, with different attributes, when you choose to pick up a new person. I don't know how to make it so all of them doesn't save in the same variable and overwrite eachother when a new person is picked up, and given random attributes. I've been trying to utilize lists, but I'm not getting that to work either. I also don't know how i would be able to manage every person using their name, since it will only be an attribute. import random as rand nameList = ["Jack", "Erik", "Bob", "Anna", "Leo", "Nikodemus", "Samuel", "David", "Lucas", "Marcus", "Noah", "Simon", "Harley", "Abigale" "Magdalena", "Marie", "Lewis", "John", "Gus", "Robin", "Jakob"] """ Something similar to this to make it random self.namn(rand.choice(nameList)) self.age(rand.randint(1, 120)) """ class Person(): """ Person is a class for representing the persons in the bus. Each object that is created from the class has a name and a age, as well as methods to return alternativly modify resepective attribute. """ def __init__(self, name, age): self.namne = namne self.age = age # Stringrepresentation of object. def __str__(self): return f"This is {self.namn}. He/she is {self.ålder} years old." # Setters def setNaem(self, newName): self.name = newName def setAge(self, newAge): self.age = newAge # Getters def getNaem(self): return self.name def getAge(self): return self.age # Adds a new person to the bus. def pickUp(): amountUp = input("How many passengers do you want to pick up?" "\n-> ") something.append(amountUp) print(f"Picked up {amountUp} persons.") A: Is it what you are trying to do? create several unique Person objects to include in a list: import random class Person: """ Person is a class for representing the persons in the bus. Each object that is created from the class has a name and a age, as well as methods to return alternativly modify resepective attribute.""" def __init__(self, name, age): self.name = name self.age = age # Stringrepresentation of object. def __str__(self): return f"This is {self.name}. He/she is {self.age} years old." # Setters def setName(self, newName): self.name = newName def setAge(self, newAge): self.age = newAge # Getters def getName(self): return self.name def getAge(self): return self.age # Adds a new person to the bus. def pickUp(): nameList = ["Jack", "Erik", "Bob", "Anna", "Leo", "Nikodemus", "Samuel", "David", "Lucas", "Marcus", "Noah", "Simon", "Harley", "Abigale", "Magdalena", "Marie", "Lewis", "John", "Gus", "Robin", "Jakob"] amountUp = int(input("How many passengers do you want to pick up?" "\n-> ")) bus = [] for counter in range(amountUp): name = random.choice(nameList) nameList.remove(name) age = random.randint(1, 120) person = Person(name, age) bus.append(person) print(f"Picked up {amountUp} persons.") pass pickUp() Then you can check what is inside the bus variable.
How do i generate different new objects with random attributes every time i run a function?
I am making a project where you can manage the passengers on a buss. I have created the class system, and know how to generate the random attributes. But I don't know how to generate multiple new persons, with different attributes, when you choose to pick up a new person. I don't know how to make it so all of them doesn't save in the same variable and overwrite eachother when a new person is picked up, and given random attributes. I've been trying to utilize lists, but I'm not getting that to work either. I also don't know how i would be able to manage every person using their name, since it will only be an attribute. import random as rand nameList = ["Jack", "Erik", "Bob", "Anna", "Leo", "Nikodemus", "Samuel", "David", "Lucas", "Marcus", "Noah", "Simon", "Harley", "Abigale" "Magdalena", "Marie", "Lewis", "John", "Gus", "Robin", "Jakob"] """ Something similar to this to make it random self.namn(rand.choice(nameList)) self.age(rand.randint(1, 120)) """ class Person(): """ Person is a class for representing the persons in the bus. Each object that is created from the class has a name and a age, as well as methods to return alternativly modify resepective attribute. """ def __init__(self, name, age): self.namne = namne self.age = age # Stringrepresentation of object. def __str__(self): return f"This is {self.namn}. He/she is {self.ålder} years old." # Setters def setNaem(self, newName): self.name = newName def setAge(self, newAge): self.age = newAge # Getters def getNaem(self): return self.name def getAge(self): return self.age # Adds a new person to the bus. def pickUp(): amountUp = input("How many passengers do you want to pick up?" "\n-> ") something.append(amountUp) print(f"Picked up {amountUp} persons.")
[ "Is it what you are trying to do? create several unique Person objects to include in a list:\nimport random\n\n\nclass Person:\n \"\"\" Person is a class for representing the persons in the bus. Each object that is created from the\n class has a name and a age, as well as methods to return alternativly modify resepective attribute.\"\"\"\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n # Stringrepresentation of object.\n def __str__(self):\n return f\"This is {self.name}. He/she is {self.age} years old.\"\n\n # Setters\n def setName(self, newName):\n self.name = newName \n\n def setAge(self, newAge):\n self.age = newAge\n\n # Getters\n def getName(self):\n return self.name\n\n def getAge(self):\n return self.age\n\n\n# Adds a new person to the bus.\ndef pickUp():\n nameList = [\"Jack\", \"Erik\", \"Bob\", \"Anna\", \"Leo\", \"Nikodemus\", \"Samuel\", \"David\", \"Lucas\", \"Marcus\", \"Noah\",\n \"Simon\",\n \"Harley\", \"Abigale\", \"Magdalena\", \"Marie\", \"Lewis\", \"John\", \"Gus\", \"Robin\", \"Jakob\"]\n\n amountUp = int(input(\"How many passengers do you want to pick up?\" \"\\n-> \"))\n bus = []\n for counter in range(amountUp):\n name = random.choice(nameList)\n nameList.remove(name)\n age = random.randint(1, 120)\n person = Person(name, age)\n bus.append(person)\n\n print(f\"Picked up {amountUp} persons.\")\n pass\n\npickUp()\n\nThen you can check what is inside the bus variable.\n" ]
[ 0 ]
[]
[]
[ "list", "oop", "python" ]
stackoverflow_0074528973_list_oop_python.txt
Q: Execute python script inside a python script I have a scenario where i want to dynamically generate a python script - inside my main python script - store it as a string and then when need be, execute this dynamically generated script from my main script. Is this possible, if so how? thanks A: For a script in a file use exec For a script in a string use eval !!! But !!! before you use strings passed in from an external source, sanity check them! Otherwise you expose the ability to execute arbitrary code from within you program, so range check your variables! You do not ever want to be asking the question: "excuse me mam, did you really name your son Robert'); DROP TABLE students; -- "? If you dont understand the reference - see this quick cartoon... http://xkcd.com/327/ but when you EVAL - you are taking full responsibility for the instructions that you are eval'ing. A: Read up on the execfile() function. http://docs.python.org/library/functions.html?highlight=exec#execfile A: May want to look at the statement exec: http://docs.python.org/release/2.5.2/ref/exec.html A: If you want to execute the script within the context of the main script, you might want to check eval [ http://docs.python.org/py3k/library/functions.html#eval ] A: Not sure how wise this is but isn't the exec function what you use if you need to execute Python code? A: There is precedence for what you are trying to do. The collections.namedtuple function builds a template string which is passed to exec in order to build a dynamically defined class. A: As of Python version 3.11, execfile (which was the most convenient function) is not listed as built-in function anymore. @George Lambert answer is the way to go. I'll add just some code for quick reference. Run script within script To run the script "run_me.py" within the script "main.py" do this: run_me.py be like print("hello_world") main.py be like with open("run_me.py", "r") as fl: exec(fl.read()) Run code contained in a string To run the code contained in a string us eval: main.py be like string_w_py_code = "print('hello world')" eval(string_w_py_code)
Execute python script inside a python script
I have a scenario where i want to dynamically generate a python script - inside my main python script - store it as a string and then when need be, execute this dynamically generated script from my main script. Is this possible, if so how? thanks
[ "For a script in a file use exec \nFor a script in a string use eval\n!!! But !!!\nbefore you use strings passed in from an external source, sanity check them!\nOtherwise you expose the ability to execute arbitrary code from \nwithin you program,\nso range check your variables!\nYou do not ever want to be asking the question:\n\"excuse me mam, did you really name your son Robert'); DROP TABLE students; -- \"? \nIf you dont understand the reference - see this quick cartoon... \nhttp://xkcd.com/327/\nbut when you EVAL - you are taking full responsibility for the instructions that you are eval'ing. \n", "Read up on the execfile() function.\nhttp://docs.python.org/library/functions.html?highlight=exec#execfile\n", "May want to look at the statement exec: http://docs.python.org/release/2.5.2/ref/exec.html\n", "If you want to execute the script within the context of the main script, you might want to check eval [ http://docs.python.org/py3k/library/functions.html#eval ]\n", "Not sure how wise this is but isn't the exec function what you use if you need to execute Python code?\n", "There is precedence for what you are trying to do. The collections.namedtuple function builds a template string which is passed to exec in order to build a dynamically defined class. \n", "As of Python version 3.11, execfile (which was the most convenient function) is not listed as built-in function anymore.\n@George Lambert answer is the way to go. I'll add just some code for quick reference.\nRun script within script\nTo run the script \"run_me.py\" within the script \"main.py\" do this:\n\nrun_me.py be like\n\nprint(\"hello_world\")\n\n\nmain.py be like\n\nwith open(\"run_me.py\", \"r\") as fl:\n exec(fl.read())\n\nRun code contained in a string\nTo run the code contained in a string us eval:\n\nmain.py be like\n\nstring_w_py_code = \"print('hello world')\"\neval(string_w_py_code)\n\n" ]
[ 13, 6, 1, 1, 0, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0003418357_python.txt
Q: How to write a simple python program that prints letters in ascending order? For example I would like to have: a . . . z aa ab . . . az bz . . . zz aaa and so on. Currently I'm here but I am lost. So feel free to propose a completely different solution. count = 0 string = '' for i in range(100): count += 1 if i % 26 == 0: count = 0 string += 'a' ch = 'a' x = chr(ord(ch) + count) string = string[:-1] + x print(i + 1, string) and my output is something like this: 1 a 2 b . . . 26 z 27 za 28 zb . . . 52 zz 53 zza 54 zzb . . . A: Maybe try something like the following: range(97,123) simply creates a range of numbers from 97 to 122, which converted to ASCII equates to a...z (done using chr()) So all our FUnction does, is it recieves a base string (starts with empty), prints out the base + range of charachters and calls its self with base + every charachter as the new base and depth decremented by 1 def printcharachters(base, depth): if depth > 0: for a in range(97, 123): print(base + chr(a)) for a in range(97, 123): printcharachters(base + chr(a), depth - 1) printcharachters("", 2) Replace the depth with your desired depth (for 2 the last string would be zz for 4 it would be zzzz). A: Using more of standard library: import itertools import string for i in range(1, 3): # Print items of length 1 and 2 for prod in itertools.product(string.ascii_lowercase, repeat=i): print(''.join(prod)) What you describe is a sorted output of n-th powers of set {'a'...'z'} in terms of cartesian products in string format (cartesian power n of a set X is, from a simple point of view, a set of all possible tuples (x_1, ..., x_n) where all x_i are contained in X). itertools.product implements exactly cartesian product and outputs in order of presence, so just loop over them. string.ascii_lowercase is a simple string containing all letters a..z in natural order. This is fast (uses C implementation of itertools).
How to write a simple python program that prints letters in ascending order?
For example I would like to have: a . . . z aa ab . . . az bz . . . zz aaa and so on. Currently I'm here but I am lost. So feel free to propose a completely different solution. count = 0 string = '' for i in range(100): count += 1 if i % 26 == 0: count = 0 string += 'a' ch = 'a' x = chr(ord(ch) + count) string = string[:-1] + x print(i + 1, string) and my output is something like this: 1 a 2 b . . . 26 z 27 za 28 zb . . . 52 zz 53 zza 54 zzb . . .
[ "Maybe try something like the following:\nrange(97,123) simply creates a range of numbers from 97 to 122, which converted to ASCII equates to a...z (done using chr())\nSo all our FUnction does, is it recieves a base string (starts with empty), prints out the base + range of charachters and calls its self with base + every charachter as the new base and depth decremented by 1\ndef printcharachters(base, depth):\n if depth > 0:\n for a in range(97, 123):\n print(base + chr(a))\n\n for a in range(97, 123):\n printcharachters(base + chr(a), depth - 1)\n\n\n\nprintcharachters(\"\", 2)\n\nReplace the depth with your desired depth (for 2 the last string would be zz for 4 it would be zzzz).\n", "Using more of standard library:\nimport itertools\nimport string\n\n\nfor i in range(1, 3): # Print items of length 1 and 2\n for prod in itertools.product(string.ascii_lowercase, repeat=i):\n print(''.join(prod))\n\nWhat you describe is a sorted output of n-th powers of set {'a'...'z'} in terms of cartesian products in string format (cartesian power n of a set X is, from a simple point of view, a set of all possible tuples (x_1, ..., x_n) where all x_i are contained in X). itertools.product implements exactly cartesian product and outputs in order of presence, so just loop over them. string.ascii_lowercase is a simple string containing all letters a..z in natural order. This is fast (uses C implementation of itertools).\n" ]
[ 2, 1 ]
[]
[]
[ "python", "string" ]
stackoverflow_0074531610_python_string.txt
Q: self in Python references to variable rather than the class So, I'm trying to code an observer pattern in Python. Main method: import Subject as Subj import ConcreteStateA as Obs Newsletter = Subj.Subject Paul = Obs Sara = Obs Julien = Obs print(Paul) print(Sara) Newsletter().addObserver(Paul) Newsletter().addObserver(Sara) Newsletter().addObserver(Julien) Newsletter().notifyObservers("Bald ist Weihnachten!") Newsletter().removeObserver(Paul) Newsletter().notifyObservers("Der Nikolaus steht vor der Tür.") class Subject: def __init__(self): self.ObserverList = [] def addObserver(self, Observer): self.ObserverList.append(Observer) def removeObserver(self, Observer): self.ObserverList.remove(Observer) def notifyObservers(message, self): print("test") for obs in self.ObserverList: print("Notified Observer") obs.update(message) import Observer class ConcreteStateA(Observer.Observer): def Handle(msg, self): print(self.__name__ + "has received a message: " +self.msg) print(msg) As far as I understand the self parameter, it references to it's own instance. So if I type it in a Subject object, it would reference to that. Yet I get this error message: AttributeError: 'str' object has no attribute 'ObserverList' I don't understand why it references to the message variable in an Subject method, shouldn't it reference to the Newsletter (Subject class) object? I tried to google my problem and read into the self parameter, yet without any success. Everything I read seemed to support my theory, that the Newsletter object should be referenced, not the string message. As I am fairly new to python there might be some other problems, i would be happy if you have other tips too :) A: def notifyObservers(message, self): print("test") for obs in self.ObserverList: print("Notified Observer") obs.update(message) The mistake is the way you define your parameters here. The fact is the first parameter will always be what you know as self - the Object. So by having a message as your first parameter, message will be used as the parameter for your object. e.g.: you could do message.ObserverList. Make sure to always have self as the first parameter. So.. def notifyObservers(self, message): print("test") for obs in self.ObserverList: print("Notified Observer") obs.update(message) and class ConcreteStateA(Observer.Observer): def Handle(self, msg): print(self.__name__ + "has received a message: " +self.msg) print(msg) should do the trick
self in Python references to variable rather than the class
So, I'm trying to code an observer pattern in Python. Main method: import Subject as Subj import ConcreteStateA as Obs Newsletter = Subj.Subject Paul = Obs Sara = Obs Julien = Obs print(Paul) print(Sara) Newsletter().addObserver(Paul) Newsletter().addObserver(Sara) Newsletter().addObserver(Julien) Newsletter().notifyObservers("Bald ist Weihnachten!") Newsletter().removeObserver(Paul) Newsletter().notifyObservers("Der Nikolaus steht vor der Tür.") class Subject: def __init__(self): self.ObserverList = [] def addObserver(self, Observer): self.ObserverList.append(Observer) def removeObserver(self, Observer): self.ObserverList.remove(Observer) def notifyObservers(message, self): print("test") for obs in self.ObserverList: print("Notified Observer") obs.update(message) import Observer class ConcreteStateA(Observer.Observer): def Handle(msg, self): print(self.__name__ + "has received a message: " +self.msg) print(msg) As far as I understand the self parameter, it references to it's own instance. So if I type it in a Subject object, it would reference to that. Yet I get this error message: AttributeError: 'str' object has no attribute 'ObserverList' I don't understand why it references to the message variable in an Subject method, shouldn't it reference to the Newsletter (Subject class) object? I tried to google my problem and read into the self parameter, yet without any success. Everything I read seemed to support my theory, that the Newsletter object should be referenced, not the string message. As I am fairly new to python there might be some other problems, i would be happy if you have other tips too :)
[ "def notifyObservers(message, self):\n print(\"test\")\n for obs in self.ObserverList:\n print(\"Notified Observer\")\n obs.update(message)\n\nThe mistake is the way you define your parameters here. The fact is the first parameter will always be what you know as self - the Object. So by having a message as your first parameter, message will be used as the parameter for your object. e.g.: you could do message.ObserverList. Make sure to always have self as the first parameter.\nSo..\ndef notifyObservers(self, message):\n print(\"test\")\n for obs in self.ObserverList:\n print(\"Notified Observer\")\n obs.update(message)\n\nand\nclass ConcreteStateA(Observer.Observer):\n def Handle(self, msg):\n print(self.__name__ + \"has received a message: \" +self.msg)\n print(msg)\n\nshould do the trick\n" ]
[ 2 ]
[]
[]
[ "python", "self" ]
stackoverflow_0074531794_python_self.txt
Q: Snakemake doesn't activate conda environment correctly I have a Python module modulename installed in a conda environment called myenvname. My snakemake file consists of one simple rule: rule checker2: output: "tata.txt" conda: "myenvname" script: "scripts/test2.py" The contents of the test2.py are the following: import modulename with open("tata.txt","w") as _f: _f.write(modulename.__version__) When I run the above snakemake file with the command snakemake -j 1 --use-conda --conda-frontend conda I get ModuleNotFoundError, which would imply that there is no modulename in my specified environment. However, when I do the following : conda activate myenvname python workflow/scripts/test2.py ... everything works perfectly. I have no idea what's going on. The full error is pasted below, with some info omitted for privacy. Traceback (most recent call last): File "/OMITTED/.snakemake/scripts/tmpheaxuqjn.test2.py", line 13, in <module> import cnvpytor as cnv ModuleNotFoundError: No module named 'MODULENAME' [Thu Nov 17 18:27:22 2022] Error in rule checker2: jobid: 0 output: tata.txt conda-env: MYENVNAME RuleException: CalledProcessError in line 12 of /OMITTED/workflow/snakefile: Command 'source /apps/qiime2/miniconda3/bin/activate 'MYENVNAME'; set -euo pipefail; /OMITTED/.conda/envs/snakemake/bin/python3.1 /OMITTED/.snakemake/scripts/tmpheaxuqjn.test2.py' returned non-zero exit status 1. File "/OMITTED/workflow/snakefile", line 12, in __rule_checker2 File "/OMITTED/.conda/envs/snakemake/lib/python3.10/concurrent/futures/thread.py", line 58, in run Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Complete log: /OMITTED/.snakemake/log/2022-11-17T182715.495739.snakemake.log EDIT: Typo in script fixed, the typo isn't in the script I'm running so it's not the issue here. EDIT2: I've tried two different attempts from comments. All three attempts are run with the same CLI command snakemake -j 1 --use-conda --conda-frontend conda Attempt 1 Rule in the snakemake: rule checker3: output: "tata.txt" conda: "myenvname" shell: """ conda env list >> {output} conda list >> {output} """ In the output file I had the following (I have lots of environs and packages I've cut out): # conda environments: # ... myenvname * /OMITTED/.conda/envs/myenvname ... # packages in environment at /OMITTED/.conda/envs/myenvname: # # Name Version Build Channel ... modulename 1.2 dev_0 <develop> ... This attempt proves that the conda environment is activated and that this environment has modulename. Attempt 2 Same as running the script, but I've modified the script to include import time; time.sleep(30); import modulename So I can snag a runnable script before it's deleted. The script has the following inserted at the start: ######## snakemake preamble start (automatically inserted, do not edit) ######## import sys; sys.path.extend(['/OMITTED/.conda/envs/snakemake/lib/python3.10/site-packages', '/OMITTED/MYWORKINGDIRECTORY/workflow/scripts']); import pickle; snakemake = pickle.loads(####a lot of stuff here###); from snakemake.logging import logger; logger.printshellcmds = False; __real_file__ = __file__; __file__ = '/OMITTED/MYWORKINGDIRECTORY/workflow/scripts/test3.py'; ######## snakemake preamble end ######### I have no idea what to do with this information. Attempt 3 Instead of running script, I've ran a shell command that runs a python script. rule checker4: output: "tata.txt" conda: "myenvname" shell: "python workflow/scripts/test3.py" It worked (showed no errors), and when I open "tata.txt" I find "1.2" which is the version of of my module. Conclusions The snakemake actually activates proper environment, but the problem is in script part. I have no idea why this is. There is a similar question here, so this is a duplicate question. A: Question is answered. Snakemake actually activates correct environment, but running a python script with the script conflicts with this directive. I don't know if this is a bug in snakemake (version is 6.14.0) or an intentional thing. I've solved the problem by running the python script via shell command with python workflow/scripts/MyScript.py - it's a bit of a problem because I had to include a CLI wrapper that would normally be solved by a snakemake object.
Snakemake doesn't activate conda environment correctly
I have a Python module modulename installed in a conda environment called myenvname. My snakemake file consists of one simple rule: rule checker2: output: "tata.txt" conda: "myenvname" script: "scripts/test2.py" The contents of the test2.py are the following: import modulename with open("tata.txt","w") as _f: _f.write(modulename.__version__) When I run the above snakemake file with the command snakemake -j 1 --use-conda --conda-frontend conda I get ModuleNotFoundError, which would imply that there is no modulename in my specified environment. However, when I do the following : conda activate myenvname python workflow/scripts/test2.py ... everything works perfectly. I have no idea what's going on. The full error is pasted below, with some info omitted for privacy. Traceback (most recent call last): File "/OMITTED/.snakemake/scripts/tmpheaxuqjn.test2.py", line 13, in <module> import cnvpytor as cnv ModuleNotFoundError: No module named 'MODULENAME' [Thu Nov 17 18:27:22 2022] Error in rule checker2: jobid: 0 output: tata.txt conda-env: MYENVNAME RuleException: CalledProcessError in line 12 of /OMITTED/workflow/snakefile: Command 'source /apps/qiime2/miniconda3/bin/activate 'MYENVNAME'; set -euo pipefail; /OMITTED/.conda/envs/snakemake/bin/python3.1 /OMITTED/.snakemake/scripts/tmpheaxuqjn.test2.py' returned non-zero exit status 1. File "/OMITTED/workflow/snakefile", line 12, in __rule_checker2 File "/OMITTED/.conda/envs/snakemake/lib/python3.10/concurrent/futures/thread.py", line 58, in run Shutting down, this might take some time. Exiting because a job execution failed. Look above for error message Complete log: /OMITTED/.snakemake/log/2022-11-17T182715.495739.snakemake.log EDIT: Typo in script fixed, the typo isn't in the script I'm running so it's not the issue here. EDIT2: I've tried two different attempts from comments. All three attempts are run with the same CLI command snakemake -j 1 --use-conda --conda-frontend conda Attempt 1 Rule in the snakemake: rule checker3: output: "tata.txt" conda: "myenvname" shell: """ conda env list >> {output} conda list >> {output} """ In the output file I had the following (I have lots of environs and packages I've cut out): # conda environments: # ... myenvname * /OMITTED/.conda/envs/myenvname ... # packages in environment at /OMITTED/.conda/envs/myenvname: # # Name Version Build Channel ... modulename 1.2 dev_0 <develop> ... This attempt proves that the conda environment is activated and that this environment has modulename. Attempt 2 Same as running the script, but I've modified the script to include import time; time.sleep(30); import modulename So I can snag a runnable script before it's deleted. The script has the following inserted at the start: ######## snakemake preamble start (automatically inserted, do not edit) ######## import sys; sys.path.extend(['/OMITTED/.conda/envs/snakemake/lib/python3.10/site-packages', '/OMITTED/MYWORKINGDIRECTORY/workflow/scripts']); import pickle; snakemake = pickle.loads(####a lot of stuff here###); from snakemake.logging import logger; logger.printshellcmds = False; __real_file__ = __file__; __file__ = '/OMITTED/MYWORKINGDIRECTORY/workflow/scripts/test3.py'; ######## snakemake preamble end ######### I have no idea what to do with this information. Attempt 3 Instead of running script, I've ran a shell command that runs a python script. rule checker4: output: "tata.txt" conda: "myenvname" shell: "python workflow/scripts/test3.py" It worked (showed no errors), and when I open "tata.txt" I find "1.2" which is the version of of my module. Conclusions The snakemake actually activates proper environment, but the problem is in script part. I have no idea why this is. There is a similar question here, so this is a duplicate question.
[ "Question is answered. Snakemake actually activates correct environment, but running a python script with the script conflicts with this directive. I don't know if this is a bug in snakemake (version is 6.14.0) or an intentional thing. I've solved the problem by running the python script via shell command with python workflow/scripts/MyScript.py - it's a bit of a problem because I had to include a CLI wrapper that would normally be solved by a snakemake object.\n" ]
[ 0 ]
[]
[]
[ "conda", "python", "snakemake" ]
stackoverflow_0074479965_conda_python_snakemake.txt
Q: Is there a way to auto-adjust Excel column widths with pandas.ExcelWriter? I am being asked to generate some Excel reports. I am currently using pandas quite heavily for my data, so naturally I would like to use the pandas.ExcelWriter method to generate these reports. However the fixed column widths are a problem. The code I have so far is simple enough. Say I have a dataframe called df: writer = pd.ExcelWriter(excel_file_path, engine='openpyxl') df.to_excel(writer, sheet_name="Summary") I was looking over the pandas docs, and I don't really see any options to set column widths. Is there a trick to make it such that the columns auto-adjust to the data? Or is there something I can do after the fact to the xlsx file to adjust the column widths? (I am using the OpenPyXL library, and generating .xlsx files - if that makes any difference.) A: Inspired by user6178746's answer, I have the following: # Given a dict of dataframes, for example: # dfs = {'gadgets': df_gadgets, 'widgets': df_widgets} writer = pd.ExcelWriter(filename, engine='xlsxwriter') for sheetname, df in dfs.items(): # loop through `dict` of dataframes df.to_excel(writer, sheet_name=sheetname) # send df to writer worksheet = writer.sheets[sheetname] # pull worksheet object for idx, col in enumerate(df): # loop through all columns series = df[col] max_len = max(( series.astype(str).map(len).max(), # len of largest item len(str(series.name)) # len of column name/header )) + 1 # adding a little extra space worksheet.set_column(idx, idx, max_len) # set column width writer.save() A: Dynamically adjust all the column lengths writer = pd.ExcelWriter('/path/to/output/file.xlsx') df.to_excel(writer, sheet_name='sheetName', index=False, na_rep='NaN') for column in df: column_length = max(df[column].astype(str).map(len).max(), len(column)) col_idx = df.columns.get_loc(column) writer.sheets['sheetName'].set_column(col_idx, col_idx, column_length) writer.save() Manually adjust a column using Column Name col_idx = df.columns.get_loc('columnName') writer.sheets['sheetName'].set_column(col_idx, col_idx, 15) Manually adjust a column using Column Index writer.sheets['sheetName'].set_column(col_idx, col_idx, 15) In case any of the above is failing with AttributeError: 'Worksheet' object has no attribute 'set_column' make sure to install xlsxwriter: pip install xlsxwriter For a more comprehensive explanation you can read the article How to Auto-Adjust the Width of Excel Columns with Pandas ExcelWriter on TDS. A: There is a nice package that I started to use recently called StyleFrame. it gets DataFrame and lets you to style it very easily... by default the columns width is auto-adjusting. for example: from StyleFrame import StyleFrame import pandas as pd df = pd.DataFrame({'aaaaaaaaaaa': [1, 2, 3], 'bbbbbbbbb': [1, 1, 1], 'ccccccccccc': [2, 3, 4]}) excel_writer = StyleFrame.ExcelWriter('example.xlsx') sf = StyleFrame(df) sf.to_excel(excel_writer=excel_writer, row_to_add_filters=0, columns_and_rows_to_freeze='B2') excel_writer.save() you can also change the columns width: sf.set_column_width(columns=['aaaaaaaaaaa', 'bbbbbbbbb'], width=35.3) UPDATE 1 In version 1.4 best_fit argument was added to StyleFrame.to_excel. See the documentation. UPDATE 2 Here's a sample of code that works for StyleFrame 3.x.x from styleframe import StyleFrame import pandas as pd columns = ['aaaaaaaaaaa', 'bbbbbbbbb', 'ccccccccccc', ] df = pd.DataFrame(data={ 'aaaaaaaaaaa': [1, 2, 3, ], 'bbbbbbbbb': [1, 1, 1, ], 'ccccccccccc': [2, 3, 4, ], }, columns=columns, ) excel_writer = StyleFrame.ExcelWriter('example.xlsx') sf = StyleFrame(df) sf.to_excel( excel_writer=excel_writer, best_fit=columns, columns_and_rows_to_freeze='B2', row_to_add_filters=0, ) excel_writer.save() A: I'm posting this because I just ran into the same issue and found that the official documentation for Xlsxwriter and pandas still have this functionality listed as unsupported. I hacked together a solution that solved the issue i was having. I basically just iterate through each column and use worksheet.set_column to set the column width == the max length of the contents of that column. One important note, however. This solution does not fit the column headers, simply the column values. That should be an easy change though if you need to fit the headers instead. Hope this helps someone :) import pandas as pd import sqlalchemy as sa import urllib read_server = 'serverName' read_database = 'databaseName' read_params = urllib.quote_plus("DRIVER={SQL Server};SERVER="+read_server+";DATABASE="+read_database+";TRUSTED_CONNECTION=Yes") read_engine = sa.create_engine("mssql+pyodbc:///?odbc_connect=%s" % read_params) #Output some SQL Server data into a dataframe my_sql_query = """ SELECT * FROM dbo.my_table """ my_dataframe = pd.read_sql_query(my_sql_query,con=read_engine) #Set destination directory to save excel. xlsFilepath = r'H:\my_project' + "\\" + 'my_file_name.xlsx' writer = pd.ExcelWriter(xlsFilepath, engine='xlsxwriter') #Write excel to file using pandas to_excel my_dataframe.to_excel(writer, startrow = 1, sheet_name='Sheet1', index=False) #Indicate workbook and worksheet for formatting workbook = writer.book worksheet = writer.sheets['Sheet1'] #Iterate through each column and set the width == the max length in that column. A padding length of 2 is also added. for i, col in enumerate(my_dataframe.columns): # find length of column i column_len = my_dataframe[col].astype(str).str.len().max() # Setting the length if the column header is larger # than the max column value length column_len = max(column_len, len(col)) + 2 # set the column length worksheet.set_column(i, i, column_len) writer.save() A: There is probably no automatic way to do it right now, but as you use openpyxl, the following line (adapted from another answer by user Bufke on how to do in manually) allows you to specify a sane value (in character widths): writer.sheets['Summary'].column_dimensions['A'].width = 15 A: By using pandas and xlsxwriter you can do your task, below code will perfectly work in Python 3.x. For more details on working with XlsxWriter with pandas this link might be useful https://xlsxwriter.readthedocs.io/working_with_pandas.html import pandas as pd writer = pd.ExcelWriter(excel_file_path, engine='xlsxwriter') df.to_excel(writer, sheet_name="Summary") workbook = writer.book worksheet = writer.sheets["Summary"] #set the column width as per your requirement worksheet.set_column('A:A', 25) writer.save() A: I found that it was more useful to adjust the column with based on the column header rather than column content. Using df.columns.values.tolist() I generate a list of the column headers and use the lengths of these headers to determine the width of the columns. See full code below: import pandas as pd import xlsxwriter writer = pd.ExcelWriter(filename, engine='xlsxwriter') df.to_excel(writer, index=False, sheet_name=sheetname) workbook = writer.book # Access the workbook worksheet= writer.sheets[sheetname] # Access the Worksheet header_list = df.columns.values.tolist() # Generate list of headers for i in range(0, len(header_list)): worksheet.set_column(i, i, len(header_list[i])) # Set column widths based on len(header) writer.save() # Save the excel file A: At work, I am always writing the dataframes to excel files. So instead of writing the same code over and over, I have created a modulus. Now I just import it and use it to write and formate the excel files. There is one downside though, it takes a long time if the dataframe is extra large. So here is the code: def result_to_excel(output_name, dataframes_list, sheet_names_list, output_dir): out_path = os.path.join(output_dir, output_name) writerReport = pd.ExcelWriter(out_path, engine='xlsxwriter', datetime_format='yyyymmdd', date_format='yyyymmdd') workbook = writerReport.book # loop through the list of dataframes to save every dataframe into a new sheet in the excel file for i, dataframe in enumerate(dataframes_list): sheet_name = sheet_names_list[i] # choose the sheet name from sheet_names_list dataframe.to_excel(writerReport, sheet_name=sheet_name, index=False, startrow=0) # Add a header format. format = workbook.add_format({ 'bold': True, 'border': 1, 'fg_color': '#0000FF', 'font_color': 'white'}) # Write the column headers with the defined format. worksheet = writerReport.sheets[sheet_name] for col_num, col_name in enumerate(dataframe.columns.values): worksheet.write(0, col_num, col_name, format) worksheet.autofilter(0, 0, 0, len(dataframe.columns) - 1) worksheet.freeze_panes(1, 0) # loop through the columns in the dataframe to get the width of the column for j, col in enumerate(dataframe.columns): max_width = max([len(str(s)) for s in dataframe[col].values] + [len(col) + 2]) # define a max width to not get to wide column if max_width > 50: max_width = 50 worksheet.set_column(j, j, max_width) writerReport.save() return output_dir + output_name A: Combining the other answers and comments and also supporting multi-indices: def autosize_excel_columns(worksheet, df): autosize_excel_columns_df(worksheet, df.index.to_frame()) autosize_excel_columns_df(worksheet, df, offset=df.index.nlevels) def autosize_excel_columns_df(worksheet, df, offset=0): for idx, col in enumerate(df): series = df[col] max_len = max(( series.astype(str).map(len).max(), len(str(series.name)) )) + 1 worksheet.set_column(idx+offset, idx+offset, max_len) sheetname=... df.to_excel(writer, sheet_name=sheetname, freeze_panes=(df.columns.nlevels, df.index.nlevels)) worksheet = writer.sheets[sheetname] autosize_excel_columns(worksheet, df) writer.save() A: you can solve the problem by calling the following function, where df is the dataframe you want to get the sizes and the sheetname is the sheet in excel where you want the modifications to take place def auto_width_columns(df, sheetname): workbook = writer.book worksheet= writer.sheets[sheetname] for i, col in enumerate(df.columns): column_len = max(df[col].astype(str).str.len().max(), len(col) + 2) worksheet.set_column(i, i, column_len) A: import re import openpyxl .. for col in _ws.columns: max_lenght = 0 print(col[0]) col_name = re.findall('\w\d', str(col[0])) col_name = col_name[0] col_name = re.findall('\w', str(col_name))[0] print(col_name) for cell in col: try: if len(str(cell.value)) > max_lenght: max_lenght = len(cell.value) except: pass adjusted_width = (max_lenght+2) _ws.column_dimensions[col_name].width = adjusted_width A: Yes, there is there is something you can do subsequently to the xlsx file to adjust the column widths. Use xlwings to autofit columns. It's a pretty simple solution, see the 6 last lines of the example code. The advantage of this procedure is that you don't have to worry about font size, font type or anything else. Requirement: Excel installation. import pandas as pd import xlwings as xw path = r"test.xlsx" # Export your dataframe in question. df = pd._testing.makeDataFrame() df.to_excel(path) # Autofit all columns with xlwings. with xw.App(visible=False) as app: wb = xw.Book(path) for ws in wb.sheets: ws.autofit(axis="columns") wb.save(path) wb.close() A: Easiest solution is to specify width of column in set_column method. for worksheet in writer.sheets.values(): worksheet.set_column(0,last_column_value, required_width_constant) A: This function works for me, also fixes the index width def write_to_excel(writer, X, sheet_name, sep_only=False): #writer=writer object #X=dataframe #sheet_name=name of sheet #sep_only=True:write only as separate excel file, False: write as sheet to the writer object if sheet_name=="": print("specify sheet_name!") else: X.to_excel(f"{output_folder}{prefix_excel_save}_{sheet_name}.xlsx") if not sep_only: X.to_excel(writer, sheet_name=sheet_name) #fix column widths worksheet = writer.sheets[sheet_name] # pull worksheet object for idx, col in enumerate(X.columns): # loop through all columns series = X[col] max_len = max(( series.astype(str).map(len).max(), # len of largest item len(str(series.name)) # len of column name/header )) + 1 # adding a little extra space worksheet.set_column(idx+1, idx+1, max_len) # set column width (=1 because index = 1) #fix index width max_len=pd.Series(X.index.values).astype(str).map(len).max()+1 worksheet.set_column(0, 0, max_len) if sep_only: print(f'{sheet_name} is written as seperate file') else: print(f'{sheet_name} is written as seperate file') print(f'{sheet_name} is written as sheet') return writer call example: writer = write_to_excel(writer, dataframe, "Statistical_Analysis") A: I may be a bit late to the party but this code works when using 'openpyxl' as your engine, sometimes pip install xlsxwriter wont solve the issue. This code below works like a charm. Edit any part as you wish. def text_length(text): """ Get the effective text length in characters, taking into account newlines """ if not text: return 0 lines = text.split("\n") return max(len(line) for line in lines) def _to_str_for_length(v, decimals=3): """ Like str() but rounds decimals to predefined length """ if isinstance(v, float): # Round to [decimal] places return str(Decimal(v).quantize(Decimal('1.' + '0' * decimals)).normalize()) else: return str(v) def auto_adjust_xlsx_column_width(df, writer, sheet_name, margin=3, length_factor=1.0, decimals=3, index=False): sheet = writer.sheets[sheet_name] _to_str = functools.partial(_to_str_for_length, decimals=decimals) # Compute & set column width for each column for column_name in df.columns: # Convert the value of the columns to string and select the column_length = max(df[column_name].apply(_to_str).map(text_length).max(), text_length(column_name)) + 5 # Get index of column in XLSX # Column index is +1 if we also export the index column col_idx = df.columns.get_loc(column_name) if index: col_idx += 1 # Set width of column to (column_length + margin) sheet.column_dimensions[openpyxl.utils.cell.get_column_letter(col_idx + 1)].width = column_length * length_factor + margin # Compute column width of index column (if enabled) if index: # If the index column is being exported index_length = max(df.index.map(_to_str).map(text_length).max(), text_length(df.index.name)) sheet.column_dimensions["A"].width = index_length * length_factor + margin A: An openpyxl version based on @alichaudry's code. The code 1) loads an excel file, 2) adjusts column widths and 3) saves it. def auto_adjust_column_widths(excel_file : "Excel File Path", extra_space = 1) -> None: """ Adjusts column widths of the excel file and replaces it with the adjusted one. Adjusting columns is based on the lengths of columns values (including column names). Parameters ---------- excel_file : excel_file to adjust column widths. extra_space : extra column width in addition to the value-based-widths """ from openpyxl import load_workbook from openpyxl.utils import get_column_letter wb = load_workbook(excel_file) for ws in wb: df = pd.DataFrame(ws.values,) for i,r in (df.astype(str).applymap(len).max(axis=0) + extra_space).iteritems(): ws.column_dimensions[get_column_letter(i+1)].width = r wb.save(excel_file)
Is there a way to auto-adjust Excel column widths with pandas.ExcelWriter?
I am being asked to generate some Excel reports. I am currently using pandas quite heavily for my data, so naturally I would like to use the pandas.ExcelWriter method to generate these reports. However the fixed column widths are a problem. The code I have so far is simple enough. Say I have a dataframe called df: writer = pd.ExcelWriter(excel_file_path, engine='openpyxl') df.to_excel(writer, sheet_name="Summary") I was looking over the pandas docs, and I don't really see any options to set column widths. Is there a trick to make it such that the columns auto-adjust to the data? Or is there something I can do after the fact to the xlsx file to adjust the column widths? (I am using the OpenPyXL library, and generating .xlsx files - if that makes any difference.)
[ "Inspired by user6178746's answer, I have the following:\n# Given a dict of dataframes, for example:\n# dfs = {'gadgets': df_gadgets, 'widgets': df_widgets}\n\nwriter = pd.ExcelWriter(filename, engine='xlsxwriter')\nfor sheetname, df in dfs.items(): # loop through `dict` of dataframes\n df.to_excel(writer, sheet_name=sheetname) # send df to writer\n worksheet = writer.sheets[sheetname] # pull worksheet object\n for idx, col in enumerate(df): # loop through all columns\n series = df[col]\n max_len = max((\n series.astype(str).map(len).max(), # len of largest item\n len(str(series.name)) # len of column name/header\n )) + 1 # adding a little extra space\n worksheet.set_column(idx, idx, max_len) # set column width\nwriter.save()\n\n", "Dynamically adjust all the column lengths\nwriter = pd.ExcelWriter('/path/to/output/file.xlsx') \ndf.to_excel(writer, sheet_name='sheetName', index=False, na_rep='NaN')\n\nfor column in df:\n column_length = max(df[column].astype(str).map(len).max(), len(column))\n col_idx = df.columns.get_loc(column)\n writer.sheets['sheetName'].set_column(col_idx, col_idx, column_length)\n\nwriter.save()\n\n\nManually adjust a column using Column Name\ncol_idx = df.columns.get_loc('columnName')\nwriter.sheets['sheetName'].set_column(col_idx, col_idx, 15)\n\n\nManually adjust a column using Column Index\nwriter.sheets['sheetName'].set_column(col_idx, col_idx, 15)\n\n\nIn case any of the above is failing with\nAttributeError: 'Worksheet' object has no attribute 'set_column'\n\nmake sure to install xlsxwriter:\npip install xlsxwriter\n\n\nFor a more comprehensive explanation you can read the article How to Auto-Adjust the Width of Excel Columns with Pandas ExcelWriter on TDS.\n", "There is a nice package that I started to use recently called StyleFrame.\nit gets DataFrame and lets you to style it very easily...\nby default the columns width is auto-adjusting.\nfor example:\nfrom StyleFrame import StyleFrame\nimport pandas as pd\n\ndf = pd.DataFrame({'aaaaaaaaaaa': [1, 2, 3], \n 'bbbbbbbbb': [1, 1, 1],\n 'ccccccccccc': [2, 3, 4]})\nexcel_writer = StyleFrame.ExcelWriter('example.xlsx')\nsf = StyleFrame(df)\nsf.to_excel(excel_writer=excel_writer, row_to_add_filters=0,\n columns_and_rows_to_freeze='B2')\nexcel_writer.save()\n\nyou can also change the columns width:\nsf.set_column_width(columns=['aaaaaaaaaaa', 'bbbbbbbbb'],\n width=35.3)\n\n\nUPDATE 1\nIn version 1.4 best_fit argument was added to StyleFrame.to_excel.\nSee the documentation.\nUPDATE 2\nHere's a sample of code that works for StyleFrame 3.x.x\nfrom styleframe import StyleFrame\nimport pandas as pd\n\ncolumns = ['aaaaaaaaaaa', 'bbbbbbbbb', 'ccccccccccc', ]\ndf = pd.DataFrame(data={\n 'aaaaaaaaaaa': [1, 2, 3, ],\n 'bbbbbbbbb': [1, 1, 1, ],\n 'ccccccccccc': [2, 3, 4, ],\n }, columns=columns,\n)\nexcel_writer = StyleFrame.ExcelWriter('example.xlsx')\nsf = StyleFrame(df)\nsf.to_excel(\n excel_writer=excel_writer, \n best_fit=columns,\n columns_and_rows_to_freeze='B2', \n row_to_add_filters=0,\n)\nexcel_writer.save()\n\n", "I'm posting this because I just ran into the same issue and found that the official documentation for Xlsxwriter and pandas still have this functionality listed as unsupported. I hacked together a solution that solved the issue i was having. I basically just iterate through each column and use worksheet.set_column to set the column width == the max length of the contents of that column. \nOne important note, however. This solution does not fit the column headers, simply the column values. That should be an easy change though if you need to fit the headers instead. Hope this helps someone :)\nimport pandas as pd\nimport sqlalchemy as sa\nimport urllib\n\n\nread_server = 'serverName'\nread_database = 'databaseName'\n\nread_params = urllib.quote_plus(\"DRIVER={SQL Server};SERVER=\"+read_server+\";DATABASE=\"+read_database+\";TRUSTED_CONNECTION=Yes\")\nread_engine = sa.create_engine(\"mssql+pyodbc:///?odbc_connect=%s\" % read_params)\n\n#Output some SQL Server data into a dataframe\nmy_sql_query = \"\"\" SELECT * FROM dbo.my_table \"\"\"\nmy_dataframe = pd.read_sql_query(my_sql_query,con=read_engine)\n\n#Set destination directory to save excel.\nxlsFilepath = r'H:\\my_project' + \"\\\\\" + 'my_file_name.xlsx'\nwriter = pd.ExcelWriter(xlsFilepath, engine='xlsxwriter')\n\n#Write excel to file using pandas to_excel\nmy_dataframe.to_excel(writer, startrow = 1, sheet_name='Sheet1', index=False)\n\n#Indicate workbook and worksheet for formatting\nworkbook = writer.book\nworksheet = writer.sheets['Sheet1']\n\n#Iterate through each column and set the width == the max length in that column. A padding length of 2 is also added.\nfor i, col in enumerate(my_dataframe.columns):\n # find length of column i\n column_len = my_dataframe[col].astype(str).str.len().max()\n # Setting the length if the column header is larger\n # than the max column value length\n column_len = max(column_len, len(col)) + 2\n # set the column length\n worksheet.set_column(i, i, column_len)\nwriter.save()\n\n", "There is probably no automatic way to do it right now, but as you use openpyxl, the following line (adapted from another answer by user Bufke on how to do in manually) allows you to specify a sane value (in character widths):\nwriter.sheets['Summary'].column_dimensions['A'].width = 15\n\n", "By using pandas and xlsxwriter you can do your task, below code will perfectly work in Python 3.x. For more details on working with XlsxWriter with pandas this link might be useful https://xlsxwriter.readthedocs.io/working_with_pandas.html\nimport pandas as pd\nwriter = pd.ExcelWriter(excel_file_path, engine='xlsxwriter')\ndf.to_excel(writer, sheet_name=\"Summary\")\nworkbook = writer.book\nworksheet = writer.sheets[\"Summary\"]\n#set the column width as per your requirement\nworksheet.set_column('A:A', 25)\nwriter.save()\n\n", "I found that it was more useful to adjust the column with based on the column header rather than column content. \nUsing df.columns.values.tolist() I generate a list of the column headers and use the lengths of these headers to determine the width of the columns.\nSee full code below:\nimport pandas as pd\nimport xlsxwriter\n\nwriter = pd.ExcelWriter(filename, engine='xlsxwriter')\ndf.to_excel(writer, index=False, sheet_name=sheetname)\n\nworkbook = writer.book # Access the workbook\nworksheet= writer.sheets[sheetname] # Access the Worksheet\n\nheader_list = df.columns.values.tolist() # Generate list of headers\nfor i in range(0, len(header_list)):\n worksheet.set_column(i, i, len(header_list[i])) # Set column widths based on len(header)\n\nwriter.save() # Save the excel file\n\n", "At work, I am always writing the dataframes to excel files. So instead of writing the same code over and over, I have created a modulus. Now I just import it and use it to write and formate the excel files. There is one downside though, it takes a long time if the dataframe is extra large.\nSo here is the code:\ndef result_to_excel(output_name, dataframes_list, sheet_names_list, output_dir):\n out_path = os.path.join(output_dir, output_name)\n writerReport = pd.ExcelWriter(out_path, engine='xlsxwriter',\n datetime_format='yyyymmdd', date_format='yyyymmdd')\n workbook = writerReport.book\n # loop through the list of dataframes to save every dataframe into a new sheet in the excel file\n for i, dataframe in enumerate(dataframes_list):\n sheet_name = sheet_names_list[i] # choose the sheet name from sheet_names_list\n dataframe.to_excel(writerReport, sheet_name=sheet_name, index=False, startrow=0)\n # Add a header format.\n format = workbook.add_format({\n 'bold': True,\n 'border': 1,\n 'fg_color': '#0000FF',\n 'font_color': 'white'})\n # Write the column headers with the defined format.\n worksheet = writerReport.sheets[sheet_name]\n for col_num, col_name in enumerate(dataframe.columns.values):\n worksheet.write(0, col_num, col_name, format)\n worksheet.autofilter(0, 0, 0, len(dataframe.columns) - 1)\n worksheet.freeze_panes(1, 0)\n # loop through the columns in the dataframe to get the width of the column\n for j, col in enumerate(dataframe.columns):\n max_width = max([len(str(s)) for s in dataframe[col].values] + [len(col) + 2])\n # define a max width to not get to wide column\n if max_width > 50:\n max_width = 50\n worksheet.set_column(j, j, max_width)\n writerReport.save()\n return output_dir + output_name\n\n\n", "Combining the other answers and comments and also supporting multi-indices:\ndef autosize_excel_columns(worksheet, df):\n autosize_excel_columns_df(worksheet, df.index.to_frame())\n autosize_excel_columns_df(worksheet, df, offset=df.index.nlevels)\n\ndef autosize_excel_columns_df(worksheet, df, offset=0):\n for idx, col in enumerate(df):\n series = df[col]\n max_len = max((\n series.astype(str).map(len).max(),\n len(str(series.name))\n )) + 1\n worksheet.set_column(idx+offset, idx+offset, max_len)\n\nsheetname=...\ndf.to_excel(writer, sheet_name=sheetname, freeze_panes=(df.columns.nlevels, df.index.nlevels))\nworksheet = writer.sheets[sheetname]\nautosize_excel_columns(worksheet, df)\nwriter.save()\n\n", "you can solve the problem by calling the following function, where df is the dataframe you want to get the sizes and the sheetname is the sheet in excel where you want the modifications to take place\ndef auto_width_columns(df, sheetname):\n workbook = writer.book \n worksheet= writer.sheets[sheetname] \n \n for i, col in enumerate(df.columns):\n column_len = max(df[col].astype(str).str.len().max(), len(col) + 2)\n worksheet.set_column(i, i, column_len)\n\n", "import re\nimport openpyxl\n..\nfor col in _ws.columns:\n max_lenght = 0\n print(col[0])\n col_name = re.findall('\\w\\d', str(col[0]))\n col_name = col_name[0]\n col_name = re.findall('\\w', str(col_name))[0]\n print(col_name)\n for cell in col:\n try:\n if len(str(cell.value)) > max_lenght:\n max_lenght = len(cell.value)\n except:\n pass\n adjusted_width = (max_lenght+2)\n _ws.column_dimensions[col_name].width = adjusted_width\n\n", "Yes, there is there is something you can do subsequently to the xlsx file to adjust the column widths.\nUse xlwings to autofit columns. It's a pretty simple solution, see the 6 last lines of the example code. The advantage of this procedure is that you don't have to worry about font size, font type or anything else.\nRequirement: Excel installation.\nimport pandas as pd\nimport xlwings as xw\n\npath = r\"test.xlsx\"\n\n# Export your dataframe in question.\ndf = pd._testing.makeDataFrame()\ndf.to_excel(path)\n\n# Autofit all columns with xlwings.\nwith xw.App(visible=False) as app:\n wb = xw.Book(path)\n\n for ws in wb.sheets:\n ws.autofit(axis=\"columns\")\n\n wb.save(path)\n wb.close()\n\n", "Easiest solution is to specify width of column in set_column method. \n for worksheet in writer.sheets.values():\n worksheet.set_column(0,last_column_value, required_width_constant)\n\n", "This function works for me, also fixes the index width\ndef write_to_excel(writer, X, sheet_name, sep_only=False):\n #writer=writer object\n #X=dataframe\n #sheet_name=name of sheet\n #sep_only=True:write only as separate excel file, False: write as sheet to the writer object\n if sheet_name==\"\": \n print(\"specify sheet_name!\")\n else:\n X.to_excel(f\"{output_folder}{prefix_excel_save}_{sheet_name}.xlsx\")\n if not sep_only: \n X.to_excel(writer, sheet_name=sheet_name)\n \n #fix column widths\n worksheet = writer.sheets[sheet_name] # pull worksheet object\n for idx, col in enumerate(X.columns): # loop through all columns\n series = X[col]\n max_len = max((\n series.astype(str).map(len).max(), # len of largest item\n len(str(series.name)) # len of column name/header\n )) + 1 # adding a little extra space\n worksheet.set_column(idx+1, idx+1, max_len) # set column width (=1 because index = 1)\n \n #fix index width\n max_len=pd.Series(X.index.values).astype(str).map(len).max()+1\n worksheet.set_column(0, 0, max_len)\n \n if sep_only: \n print(f'{sheet_name} is written as seperate file')\n else:\n print(f'{sheet_name} is written as seperate file')\n print(f'{sheet_name} is written as sheet')\n return writer\n\ncall example:\nwriter = write_to_excel(writer, dataframe, \"Statistical_Analysis\")\n\n", "I may be a bit late to the party but this code works when using 'openpyxl' as your engine, sometimes pip install xlsxwriter wont solve the issue. This code below works like a charm. Edit any part as you wish.\ndef text_length(text):\n \"\"\"\n Get the effective text length in characters, taking into account newlines\n \"\"\"\n if not text:\n return 0\n lines = text.split(\"\\n\")\n return max(len(line) for line in lines)\n\ndef _to_str_for_length(v, decimals=3):\n \"\"\"\n Like str() but rounds decimals to predefined length\n \"\"\"\n if isinstance(v, float):\n # Round to [decimal] places\n return str(Decimal(v).quantize(Decimal('1.' + '0' * decimals)).normalize())\n else:\n return str(v)\n\n\ndef auto_adjust_xlsx_column_width(df, writer, sheet_name, margin=3, length_factor=1.0, decimals=3, index=False):\n\n sheet = writer.sheets[sheet_name]\n _to_str = functools.partial(_to_str_for_length, decimals=decimals)\n # Compute & set column width for each column\n for column_name in df.columns:\n # Convert the value of the columns to string and select the \n column_length = max(df[column_name].apply(_to_str).map(text_length).max(), text_length(column_name)) + 5\n # Get index of column in XLSX\n # Column index is +1 if we also export the index column\n col_idx = df.columns.get_loc(column_name)\n if index:\n col_idx += 1\n # Set width of column to (column_length + margin)\n sheet.column_dimensions[openpyxl.utils.cell.get_column_letter(col_idx + 1)].width = column_length * length_factor + margin\n # Compute column width of index column (if enabled)\n if index: # If the index column is being exported\n index_length = max(df.index.map(_to_str).map(text_length).max(), text_length(df.index.name))\n sheet.column_dimensions[\"A\"].width = index_length * length_factor + margin\n\n", "An openpyxl version based on @alichaudry's code.\nThe code 1) loads an excel file, 2) adjusts column widths and 3) saves it.\ndef auto_adjust_column_widths(excel_file : \"Excel File Path\", extra_space = 1) -> None:\n \"\"\"\n Adjusts column widths of the excel file and replaces it with the adjusted one.\n Adjusting columns is based on the lengths of columns values (including column names).\n Parameters\n ----------\n excel_file :\n excel_file to adjust column widths.\n \n extra_space : \n extra column width in addition to the value-based-widths\n \"\"\"\n\n from openpyxl import load_workbook\n from openpyxl.utils import get_column_letter\n\n\n wb = load_workbook(excel_file)\n\n \n for ws in wb:\n df = pd.DataFrame(ws.values,)\n\n \n for i,r in (df.astype(str).applymap(len).max(axis=0) + extra_space).iteritems():\n ws.column_dimensions[get_column_letter(i+1)].width = r\n\n \n wb.save(excel_file)\n\n" ]
[ 109, 48, 35, 35, 24, 16, 7, 6, 4, 4, 3, 3, 1, 1, 0, 0 ]
[]
[]
[ "excel", "openpyxl", "pandas", "python" ]
stackoverflow_0017326973_excel_openpyxl_pandas_python.txt
Q: How can I use Python to convert multiple columns in the same row to another row? I have an excel file which has multiple title names as columns within the same row where the data is given, I need to sort the data and convert the column names to rows and assign it to the data under the "column names" enter image description here My expected output is for it to turn out like this: enter image description here Im not sure how to start with this, can someone help out here? Edit: sorry about the img description, im new here A: You can check rows with names of new column values by column b testing missing values, replace non matched a column values to missing values by Series.where and forward filling missing values, last filter with inverted mask and columns a,c in DataFrame.loc: df = pd.read_excel('file.xlsx') #sample data print (df) a b c 0 IT NaN NaN 1 User 1.0 user1@gmail.com 2 Data NaN NaN 3 User 1.0 user5@gmail.com 4 User 1.0 user2@gmail.com m = df['c'].isna() df['a'] = df['a'].where(m).ffill() df = df.loc[~m, ['a','c']] print (df) a c 1 IT user1@gmail.com 3 Data user5@gmail.com 4 Data user2@gmail.com
How can I use Python to convert multiple columns in the same row to another row?
I have an excel file which has multiple title names as columns within the same row where the data is given, I need to sort the data and convert the column names to rows and assign it to the data under the "column names" enter image description here My expected output is for it to turn out like this: enter image description here Im not sure how to start with this, can someone help out here? Edit: sorry about the img description, im new here
[ "You can check rows with names of new column values by column b testing missing values, replace non matched a column values to missing values by Series.where and forward filling missing values, last filter with inverted mask and columns a,c in DataFrame.loc:\ndf = pd.read_excel('file.xlsx')\n\n#sample data\nprint (df)\n a b c\n0 IT NaN NaN\n1 User 1.0 user1@gmail.com\n2 Data NaN NaN\n3 User 1.0 user5@gmail.com\n4 User 1.0 user2@gmail.com\n\nm = df['c'].isna()\ndf['a'] = df['a'].where(m).ffill()\n\ndf = df.loc[~m, ['a','c']]\nprint (df)\n a c\n1 IT user1@gmail.com\n3 Data user5@gmail.com\n4 Data user2@gmail.com\n\n" ]
[ 0 ]
[]
[]
[ "dataframe", "excel", "pandas", "python" ]
stackoverflow_0074531050_dataframe_excel_pandas_python.txt
Q: How do I get the day of week given a date? I want to find out the following: given a date (datetime object), what is the corresponding day of the week? For instance, Sunday is the first day, Monday: second day.. and so on And then if the input is something like today's date. Example >>> today = datetime.datetime(2017, 10, 20) >>> today.get_weekday() # what I look for The output is maybe 6 (since it's Friday) A: Use weekday(): >>> import datetime >>> datetime.datetime.today() datetime.datetime(2012, 3, 23, 23, 24, 55, 173504) >>> datetime.datetime.today().weekday() 4 From the documentation: Return the day of the week as an integer, where Monday is 0 and Sunday is 6. A: If you'd like to have the date in English: from datetime import date import calendar my_date = date.today() calendar.day_name[my_date.weekday()] #'Wednesday' A: If you'd like to have the date in English: from datetime import datetime datetime.today().strftime('%A') 'Wednesday' Read more: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior A: Use date.weekday() when Monday is 0 and Sunday is 6 or date.isoweekday() when Monday is 1 and Sunday is 7 A: I solved this for a CodeChef question. import datetime dt = '21/03/2012' day, month, year = (int(x) for x in dt.split('/')) ans = datetime.date(year, month, day) print (ans.strftime("%A")) A: A solution whithout imports for dates after 1700/1/1 def weekDay(year, month, day): offset = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334] week = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'] afterFeb = 1 if month > 2: afterFeb = 0 aux = year - 1700 - afterFeb # dayOfWeek for 1700/1/1 = 5, Friday dayOfWeek = 5 # partial sum of days betweem current date and 1700/1/1 dayOfWeek += (aux + afterFeb) * 365 # leap year correction dayOfWeek += aux / 4 - aux / 100 + (aux + 100) / 400 # sum monthly and day offsets dayOfWeek += offset[month - 1] + (day - 1) dayOfWeek %= 7 return dayOfWeek, week[dayOfWeek] print weekDay(2013, 6, 15) == (6, 'Saturday') print weekDay(1969, 7, 20) == (0, 'Sunday') print weekDay(1945, 4, 30) == (1, 'Monday') print weekDay(1900, 1, 1) == (1, 'Monday') print weekDay(1789, 7, 14) == (2, 'Tuesday') A: If you have dates as a string, it might be easier to do it using pandas' Timestamp import pandas as pd df = pd.Timestamp("2019-04-12") print(df.dayofweek, df.weekday_name) Output: 4 Friday A: Here's a simple code snippet to solve this problem import datetime intDay = datetime.date(year=2000, month=12, day=1).weekday() days = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] print(days[intDay]) The output should be: Friday A: This is a solution if the date is a datetime object. import datetime def dow(date): days=["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"] dayNumber=date.weekday() print days[dayNumber] A: Say you have timeStamp: String variable, YYYY-MM-DD HH:MM:SS step 1: convert it to dateTime function with blow code... df['timeStamp'] = pd.to_datetime(df['timeStamp']) Step 2 : Now you can extract all the required feature as below which will create new Column for each of the fild- hour,month,day of week,year, date df['Hour'] = df['timeStamp'].apply(lambda time: time.hour) df['Month'] = df['timeStamp'].apply(lambda time: time.month) df['Day of Week'] = df['timeStamp'].apply(lambda time: time.dayofweek) df['Year'] = df['timeStamp'].apply(lambda t: t.year) df['Date'] = df['timeStamp'].apply(lambda t: t.day) A: datetime library sometimes gives errors with strptime() so I switched to dateutil library. Here's an example of how you can use it : from dateutil import parser parser.parse('January 11, 2010').strftime("%a") The output that you get from this is 'Mon'. If you want the output as 'Monday', use the following : parser.parse('January 11, 2010').strftime("%A") This worked for me pretty quickly. I was having problems while using the datetime library because I wanted to store the weekday name instead of weekday number and the format from using the datetime library was causing problems. If you're not having problems with this, great! If you are, you cand efinitely go for this as it has a simpler syntax as well. Hope this helps. A: Assuming you are given the day, month, and year, you could do: import datetime DayL = ['Mon','Tues','Wednes','Thurs','Fri','Satur','Sun'] date = DayL[datetime.date(year,month,day).weekday()] + 'day' #Set day, month, year to your value #Now, date is set as an actual day, not a number from 0 to 6. print(date) A: If you have reason to avoid the use of the datetime module, then this function will work. Note: The change from the Julian to the Gregorian calendar is assumed to have occurred in 1582. If this is not true for your calendar of interest then change the line if year > 1582: accordingly. def dow(year,month,day): """ day of week, Sunday = 1, Saturday = 7 http://en.wikipedia.org/wiki/Zeller%27s_congruence """ m, q = month, day if m == 1: m = 13 year -= 1 elif m == 2: m = 14 year -= 1 K = year % 100 J = year // 100 f = (q + int(13*(m + 1)/5.0) + K + int(K/4.0)) fg = f + int(J/4.0) - 2 * J fj = f + 5 - J if year > 1582: h = fg % 7 else: h = fj % 7 if h == 0: h = 7 return h A: This don't need to day of week comments. I recommend this code~! import datetime DAY_OF_WEEK = { "MONDAY": 0, "TUESDAY": 1, "WEDNESDAY": 2, "THURSDAY": 3, "FRIDAY": 4, "SATURDAY": 5, "SUNDAY": 6 } def string_to_date(dt, format='%Y%m%d'): return datetime.datetime.strptime(dt, format) def date_to_string(date, format='%Y%m%d'): return datetime.datetime.strftime(date, format) def day_of_week(dt): return string_to_date(dt).weekday() dt = '20210101' if day_of_week(dt) == DAY_OF_WEEK['SUNDAY']: None A: If you're not solely reliant on the datetime module, calendar might be a better alternative. This, for example, will provide you with the day codes: calendar.weekday(2017,12,22); And this will give you the day itself: days = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"] days[calendar.weekday(2017,12,22)] Or in the style of python, as a one liner: ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"][calendar.weekday(2017,12,22)] A: import datetime int(datetime.datetime.today().strftime('%w'))+1 this should give you your real day number - 1 = sunday, 2 = monday, etc... A: To get Sunday as 1 through Saturday as 7, this is the simplest solution to your question: datetime.date.today().toordinal()%7 + 1 All of them: import datetime today = datetime.date.today() sunday = today - datetime.timedelta(today.weekday()+1) for i in range(7): tmp_date = sunday + datetime.timedelta(i) print tmp_date.toordinal()%7 + 1, '==', tmp_date.strftime('%A') Output: 1 == Sunday 2 == Monday 3 == Tuesday 4 == Wednesday 5 == Thursday 6 == Friday 7 == Saturday A: Here is how to convert a list of little endian string dates to datetime: import datetime, time ls = ['31/1/2007', '14/2/2017'] for d in ls: dt = datetime.datetime.strptime(d, "%d/%m/%Y") print(dt) print(dt.strftime("%A")) A: We can take help of Pandas: import pandas as pd As mentioned above in the problem We have: datetime(2017, 10, 20) If execute this line in the jupyter notebook we have an output like this: datetime.datetime(2017, 10, 20, 0, 0) Using weekday() and weekday_name: If you want weekdays in integer number format then use: pd.to_datetime(datetime(2017, 10, 20)).weekday() The output will be: 4 And if you want it as name of the day like Sunday, Monday, Friday, etc you can use: pd.to_datetime(datetime(2017, 10, 20)).weekday_name The output will be: 'Friday' If having a dates column in Pandas dataframe then: Now suppose if you have a pandas dataframe having a date column like this: pdExampleDataFrame['Dates'].head(5) 0 2010-04-01 1 2010-04-02 2 2010-04-03 3 2010-04-04 4 2010-04-05 Name: Dates, dtype: datetime64[ns] Now If we want to know the name of the weekday like Monday, Tuesday, ..etc we can use .weekday_name as follows: pdExampleDataFrame.head(5)['Dates'].dt.weekday_name the output will be: 0 Thursday 1 Friday 2 Saturday 3 Sunday 4 Monday Name: Dates, dtype: object And if we want the integer number of weekday from this Dates column then we can use: pdExampleDataFrame.head(5)['Dates'].apply(lambda x: x.weekday()) The output will look like this: 0 3 1 4 2 5 3 6 4 0 Name: Dates, dtype: int64 A: import datetime import calendar day, month, year = map(int, input().split()) my_date = datetime.date(year, month, day) print(calendar.day_name[my_date.weekday()]) Output Sample 08 05 2015 Friday A: If you want to generate a column with a range of dates (Date) and generate a column that goes to the first one and assigns the Week Day (Week Day), do the following (I will used the dates ranging from 2008-01-01 to 2020-02-01): import pandas as pd dr = pd.date_range(start='2008-01-01', end='2020-02-1') df = pd.DataFrame() df['Date'] = dr df['Week Day'] = pd.to_datetime(dr).weekday The output is the following: The Week Day varies from 0 to 6, where 0 corresponds to Monday and 6 to Sunday. A: Here's a fresh way. Sunday is 0. from datetime import datetime today = datetime(year=2022, month=6, day=17) print(today.toordinal()%7) # 5 yesterday = datetime(year=1, month=1, day=1) print(today.toordinal()%7) # 1 A: A simple, straightforward and still not mentioned option: import datetime ... givenDateObj = datetime.date(2017, 10, 20) weekday = givenDateObj.isocalendar()[2] # 5 weeknumber = givenDateObj.isocalendar()[1] # 42 A: If u are Chinese user, u can use this package: https://github.com/LKI/chinese-calendar import datetime # 判断 2018年4月30号 是不是节假日 from chinese_calendar import is_holiday, is_workday april_last = datetime.date(2018, 4, 30) assert is_workday(april_last) is False assert is_holiday(april_last) is True # 或者在判断的同时,获取节日名 import chinese_calendar as calendar # 也可以这样 import on_holiday, holiday_name = calendar.get_holiday_detail(april_last) assert on_holiday is True assert holiday_name == calendar.Holiday.labour_day.value # 还能判断法定节假日是不是调休 import chinese_calendar assert chinese_calendar.is_in_lieu(datetime.date(2006, 2, 1)) is False assert chinese_calendar.is_in_lieu(datetime.date(2006, 2, 2)) is True A: Using Canlendar Module import calendar a=calendar.weekday(year,month,day) days=["MONDAY","TUESDAY","WEDNESDAY","THURSDAY","FRIDAY","SATURDAY","SUNDAY"] print(days[a]) A: Here is my python3 implementation. months = {'jan' : 1, 'feb' : 4, 'mar' : 4, 'apr':0, 'may':2, 'jun':5, 'jul':6, 'aug':3, 'sep':6, 'oct':1, 'nov':4, 'dec':6} dates = {'Sunday':1, 'Monday':2, 'Tuesday':3, 'Wednesday':4, 'Thursday':5, 'Friday':6, 'Saterday':0} ranges = {'1800-1899':2, '1900-1999':0, '2000-2099':6, '2100-2199':4, '2200-2299':2} def getValue(val, dic): if(len(val)==4): for k,v in dic.items(): x,y=int(k.split('-')[0]),int(k.split('-')[1]) val = int(val) if(val>=x and val<=y): return v else: return dic[val] def getDate(val): return (list(dates.keys())[list(dates.values()).index(val)]) def main(myDate): dateArray = myDate.split('-') # print(dateArray) date,month,year = dateArray[2],dateArray[1],dateArray[0] # print(date,month,year) date = int(date) month_v = getValue(month, months) year_2 = int(year[2:]) div = year_2//4 year_v = getValue(year, ranges) sumAll = date+month_v+year_2+div+year_v val = (sumAll)%7 str_date = getDate(val) print('{} is a {}.'.format(myDate, str_date)) if __name__ == "__main__": testDate = '2018-mar-4' main(testDate) A: import numpy as np def date(df): df['weekday'] = df['date'].dt.day_name() conditions = [(df['weekday'] == 'Sunday'), (df['weekday'] == 'Monday'), (df['weekday'] == 'Tuesday'), (df['weekday'] == 'Wednesday'), (df['weekday'] == 'Thursday'), (df['weekday'] == 'Friday'), (df['weekday'] == 'Saturday')] choices = [0, 1, 2, 3, 4, 5, 6] df['week'] = np.select(conditions, choices) return df A: Below is the code to enter date in the format of DD-MM-YYYY you can change the input format by changing the order of '%d-%m-%Y' and also by changing the delimiter. import datetime try: date = input() date_time_obj = datetime.datetime.strptime(date, '%d-%m-%Y') print(date_time_obj.strftime('%A')) except ValueError: print("Invalid date.") A: In MATLAB, Gauss' method day_name={'Sun','Mon','Tue','Wed','Thu','Fri','Sat'} month_offset=[0 3 3 6 1 4 6 2 5 0 3 5]; % common year % input date y1=2022 m1=11 d1=22 % is y1 leap if mod(y1,4)==0 && mod(y1,100)==0 && mod(y1,400)==0 month_offset=[0 3 4 0 2 5 0 3 6 1 4 6]; % offset for leap year end % Gregorian calendar weekday_gregor=rem( d1+month_offset(m1) + 5*rem(y1-1,4) + 4*rem(y1-1,100) + 6*rem(y1-1,400),7) day_name{weekday_gregor+1} 0: Sunday 1: Monday .. 6: Saturday
How do I get the day of week given a date?
I want to find out the following: given a date (datetime object), what is the corresponding day of the week? For instance, Sunday is the first day, Monday: second day.. and so on And then if the input is something like today's date. Example >>> today = datetime.datetime(2017, 10, 20) >>> today.get_weekday() # what I look for The output is maybe 6 (since it's Friday)
[ "Use weekday():\n>>> import datetime\n>>> datetime.datetime.today()\ndatetime.datetime(2012, 3, 23, 23, 24, 55, 173504)\n>>> datetime.datetime.today().weekday()\n4\n\nFrom the documentation:\n\nReturn the day of the week as an integer, where Monday is 0 and Sunday is 6.\n\n", "If you'd like to have the date in English:\nfrom datetime import date\nimport calendar\nmy_date = date.today()\ncalendar.day_name[my_date.weekday()] #'Wednesday'\n\n", "If you'd like to have the date in English:\nfrom datetime import datetime\ndatetime.today().strftime('%A')\n'Wednesday'\n\nRead more:\nhttps://docs.python.org/3/library/datetime.html#strftime-strptime-behavior\n", "Use date.weekday() when Monday is 0 and Sunday is 6\nor\ndate.isoweekday() when Monday is 1 and Sunday is 7\n", "I solved this for a CodeChef question.\nimport datetime\ndt = '21/03/2012'\nday, month, year = (int(x) for x in dt.split('/')) \nans = datetime.date(year, month, day)\nprint (ans.strftime(\"%A\"))\n\n", "A solution whithout imports for dates after 1700/1/1 \ndef weekDay(year, month, day):\n offset = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]\n week = ['Sunday', \n 'Monday', \n 'Tuesday', \n 'Wednesday', \n 'Thursday', \n 'Friday', \n 'Saturday']\n afterFeb = 1\n if month > 2: afterFeb = 0\n aux = year - 1700 - afterFeb\n # dayOfWeek for 1700/1/1 = 5, Friday\n dayOfWeek = 5\n # partial sum of days betweem current date and 1700/1/1\n dayOfWeek += (aux + afterFeb) * 365 \n # leap year correction \n dayOfWeek += aux / 4 - aux / 100 + (aux + 100) / 400 \n # sum monthly and day offsets\n dayOfWeek += offset[month - 1] + (day - 1) \n dayOfWeek %= 7\n return dayOfWeek, week[dayOfWeek]\n\nprint weekDay(2013, 6, 15) == (6, 'Saturday')\nprint weekDay(1969, 7, 20) == (0, 'Sunday')\nprint weekDay(1945, 4, 30) == (1, 'Monday')\nprint weekDay(1900, 1, 1) == (1, 'Monday')\nprint weekDay(1789, 7, 14) == (2, 'Tuesday')\n\n", "If you have dates as a string, it might be easier to do it using pandas' Timestamp\nimport pandas as pd\ndf = pd.Timestamp(\"2019-04-12\")\nprint(df.dayofweek, df.weekday_name)\n\nOutput:\n4 Friday\n\n", "Here's a simple code snippet to solve this problem\nimport datetime\n\nintDay = datetime.date(year=2000, month=12, day=1).weekday()\ndays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\nprint(days[intDay])\n\nThe output should be:\nFriday\n\n", "This is a solution if the date is a datetime object.\nimport datetime\ndef dow(date):\n days=[\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"]\n dayNumber=date.weekday()\n print days[dayNumber]\n\n", "Say you have timeStamp: String variable, YYYY-MM-DD HH:MM:SS\nstep 1: convert it to dateTime function with blow code...\ndf['timeStamp'] = pd.to_datetime(df['timeStamp'])\n\nStep 2 : Now you can extract all the required feature as below which will create new Column for each of the fild- hour,month,day of week,year, date\ndf['Hour'] = df['timeStamp'].apply(lambda time: time.hour)\ndf['Month'] = df['timeStamp'].apply(lambda time: time.month)\ndf['Day of Week'] = df['timeStamp'].apply(lambda time: time.dayofweek)\ndf['Year'] = df['timeStamp'].apply(lambda t: t.year)\ndf['Date'] = df['timeStamp'].apply(lambda t: t.day)\n\n", "datetime library sometimes gives errors with strptime() so I switched to dateutil library. Here's an example of how you can use it :\nfrom dateutil import parser\nparser.parse('January 11, 2010').strftime(\"%a\")\n\nThe output that you get from this is 'Mon'. If you want the output as 'Monday', use the following :\nparser.parse('January 11, 2010').strftime(\"%A\")\n\nThis worked for me pretty quickly. I was having problems while using the datetime library because I wanted to store the weekday name instead of weekday number and the format from using the datetime library was causing problems. If you're not having problems with this, great! If you are, you cand efinitely go for this as it has a simpler syntax as well. Hope this helps.\n", "Assuming you are given the day, month, and year, you could do:\nimport datetime\nDayL = ['Mon','Tues','Wednes','Thurs','Fri','Satur','Sun']\ndate = DayL[datetime.date(year,month,day).weekday()] + 'day'\n#Set day, month, year to your value\n#Now, date is set as an actual day, not a number from 0 to 6.\n\nprint(date)\n\n", "If you have reason to avoid the use of the datetime module, then this function will work.\nNote: The change from the Julian to the Gregorian calendar is assumed to have occurred in 1582. If this is not true for your calendar of interest then change the line if year > 1582: accordingly.\ndef dow(year,month,day):\n \"\"\" day of week, Sunday = 1, Saturday = 7\n http://en.wikipedia.org/wiki/Zeller%27s_congruence \"\"\"\n m, q = month, day\n if m == 1:\n m = 13\n year -= 1\n elif m == 2:\n m = 14\n year -= 1\n K = year % 100 \n J = year // 100\n f = (q + int(13*(m + 1)/5.0) + K + int(K/4.0))\n fg = f + int(J/4.0) - 2 * J\n fj = f + 5 - J\n if year > 1582:\n h = fg % 7\n else:\n h = fj % 7\n if h == 0:\n h = 7\n return h\n\n", "This don't need to day of week comments.\nI recommend this code~!\nimport datetime\n\n\nDAY_OF_WEEK = {\n \"MONDAY\": 0,\n \"TUESDAY\": 1,\n \"WEDNESDAY\": 2,\n \"THURSDAY\": 3,\n \"FRIDAY\": 4,\n \"SATURDAY\": 5,\n \"SUNDAY\": 6\n}\n\ndef string_to_date(dt, format='%Y%m%d'):\n return datetime.datetime.strptime(dt, format)\n\ndef date_to_string(date, format='%Y%m%d'):\n return datetime.datetime.strftime(date, format)\n\ndef day_of_week(dt):\n return string_to_date(dt).weekday()\n\n\ndt = '20210101'\nif day_of_week(dt) == DAY_OF_WEEK['SUNDAY']:\n None\n\n", "If you're not solely reliant on the datetime module, calendar might be a better alternative. This, for example, will provide you with the day codes:\ncalendar.weekday(2017,12,22);\n\nAnd this will give you the day itself:\ndays = [\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"]\ndays[calendar.weekday(2017,12,22)]\n\nOr in the style of python, as a one liner:\n[\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"][calendar.weekday(2017,12,22)]\n\n", "import datetime\nint(datetime.datetime.today().strftime('%w'))+1\n\nthis should give you your real day number - 1 = sunday, 2 = monday, etc...\n", "To get Sunday as 1 through Saturday as 7, this is the simplest solution to your question:\ndatetime.date.today().toordinal()%7 + 1\n\nAll of them:\nimport datetime\n\ntoday = datetime.date.today()\nsunday = today - datetime.timedelta(today.weekday()+1)\n\nfor i in range(7):\n tmp_date = sunday + datetime.timedelta(i)\n print tmp_date.toordinal()%7 + 1, '==', tmp_date.strftime('%A')\n\nOutput:\n1 == Sunday\n2 == Monday\n3 == Tuesday\n4 == Wednesday\n5 == Thursday\n6 == Friday\n7 == Saturday\n\n", "Here is how to convert a list of little endian string dates to datetime:\nimport datetime, time\nls = ['31/1/2007', '14/2/2017']\nfor d in ls: \n dt = datetime.datetime.strptime(d, \"%d/%m/%Y\")\n print(dt)\n print(dt.strftime(\"%A\"))\n\n", "We can take help of Pandas:\nimport pandas as pd\n\nAs mentioned above in the problem We have:\ndatetime(2017, 10, 20)\n\nIf execute this line in the jupyter notebook we have an output like this:\ndatetime.datetime(2017, 10, 20, 0, 0)\n\nUsing weekday() and weekday_name:\nIf you want weekdays in integer number format then use:\npd.to_datetime(datetime(2017, 10, 20)).weekday()\n\nThe output will be:\n4\n\nAnd if you want it as name of the day like Sunday, Monday, Friday, etc you can use:\npd.to_datetime(datetime(2017, 10, 20)).weekday_name\n\nThe output will be:\n'Friday'\nIf having a dates column in Pandas dataframe then:\nNow suppose if you have a pandas dataframe having a date column like this:\npdExampleDataFrame['Dates'].head(5)\n0 2010-04-01\n1 2010-04-02\n2 2010-04-03\n3 2010-04-04\n4 2010-04-05\nName: Dates, dtype: datetime64[ns]\n\nNow If we want to know the name of the weekday like Monday, Tuesday, ..etc we can use .weekday_name as follows:\npdExampleDataFrame.head(5)['Dates'].dt.weekday_name\n\nthe output will be:\n0 Thursday\n1 Friday\n2 Saturday\n3 Sunday\n4 Monday\nName: Dates, dtype: object\n\nAnd if we want the integer number of weekday from this Dates column then we can use:\npdExampleDataFrame.head(5)['Dates'].apply(lambda x: x.weekday())\n\nThe output will look like this:\n0 3\n1 4\n2 5\n3 6\n4 0\nName: Dates, dtype: int64\n\n", "import datetime\nimport calendar\n\nday, month, year = map(int, input().split())\nmy_date = datetime.date(year, month, day)\nprint(calendar.day_name[my_date.weekday()])\n\nOutput Sample\n08 05 2015\nFriday\n\n", "If you want to generate a column with a range of dates (Date) and generate a column that goes to the first one and assigns the Week Day (Week Day), do the following (I will used the dates ranging from 2008-01-01 to 2020-02-01):\nimport pandas as pd\ndr = pd.date_range(start='2008-01-01', end='2020-02-1')\ndf = pd.DataFrame()\ndf['Date'] = dr\ndf['Week Day'] = pd.to_datetime(dr).weekday\n\nThe output is the following:\n\nThe Week Day varies from 0 to 6, where 0 corresponds to Monday and 6 to Sunday.\n", "Here's a fresh way. Sunday is 0.\nfrom datetime import datetime\ntoday = datetime(year=2022, month=6, day=17)\nprint(today.toordinal()%7) # 5\nyesterday = datetime(year=1, month=1, day=1)\nprint(today.toordinal()%7) # 1\n\n", "A simple, straightforward and still not mentioned option:\nimport datetime\n...\ngivenDateObj = datetime.date(2017, 10, 20)\nweekday = givenDateObj.isocalendar()[2] # 5\nweeknumber = givenDateObj.isocalendar()[1] # 42\n\n", "If u are Chinese user, u can use this package:\nhttps://github.com/LKI/chinese-calendar\nimport datetime\n\n# 判断 2018年4月30号 是不是节假日\nfrom chinese_calendar import is_holiday, is_workday\napril_last = datetime.date(2018, 4, 30)\nassert is_workday(april_last) is False\nassert is_holiday(april_last) is True\n\n# 或者在判断的同时,获取节日名\nimport chinese_calendar as calendar # 也可以这样 import\non_holiday, holiday_name = calendar.get_holiday_detail(april_last)\nassert on_holiday is True\nassert holiday_name == calendar.Holiday.labour_day.value\n\n# 还能判断法定节假日是不是调休\nimport chinese_calendar\nassert chinese_calendar.is_in_lieu(datetime.date(2006, 2, 1)) is False\nassert chinese_calendar.is_in_lieu(datetime.date(2006, 2, 2)) is True\n\n", "Using Canlendar Module\nimport calendar\na=calendar.weekday(year,month,day)\ndays=[\"MONDAY\",\"TUESDAY\",\"WEDNESDAY\",\"THURSDAY\",\"FRIDAY\",\"SATURDAY\",\"SUNDAY\"]\nprint(days[a])\n\n", "Here is my python3 implementation. \nmonths = {'jan' : 1, 'feb' : 4, 'mar' : 4, 'apr':0, 'may':2, 'jun':5, 'jul':6, 'aug':3, 'sep':6, 'oct':1, 'nov':4, 'dec':6}\ndates = {'Sunday':1, 'Monday':2, 'Tuesday':3, 'Wednesday':4, 'Thursday':5, 'Friday':6, 'Saterday':0}\nranges = {'1800-1899':2, '1900-1999':0, '2000-2099':6, '2100-2199':4, '2200-2299':2}\n\ndef getValue(val, dic):\n if(len(val)==4):\n for k,v in dic.items():\n x,y=int(k.split('-')[0]),int(k.split('-')[1])\n val = int(val)\n if(val>=x and val<=y):\n return v\n else:\n return dic[val]\n\ndef getDate(val):\n return (list(dates.keys())[list(dates.values()).index(val)]) \n\n\n\ndef main(myDate):\n dateArray = myDate.split('-')\n # print(dateArray)\n date,month,year = dateArray[2],dateArray[1],dateArray[0]\n # print(date,month,year)\n\n date = int(date)\n month_v = getValue(month, months)\n year_2 = int(year[2:])\n div = year_2//4\n year_v = getValue(year, ranges)\n sumAll = date+month_v+year_2+div+year_v\n val = (sumAll)%7\n str_date = getDate(val)\n\n print('{} is a {}.'.format(myDate, str_date))\n\nif __name__ == \"__main__\":\n testDate = '2018-mar-4'\n main(testDate)\n\n", "import numpy as np\n\ndef date(df):\n df['weekday'] = df['date'].dt.day_name()\n\n conditions = [(df['weekday'] == 'Sunday'),\n (df['weekday'] == 'Monday'),\n (df['weekday'] == 'Tuesday'),\n (df['weekday'] == 'Wednesday'),\n (df['weekday'] == 'Thursday'),\n (df['weekday'] == 'Friday'),\n (df['weekday'] == 'Saturday')]\n\n choices = [0, 1, 2, 3, 4, 5, 6]\n\n df['week'] = np.select(conditions, choices)\n\n return df\n\n", "Below is the code to enter date in the format of DD-MM-YYYY you can change the input format by changing the order of '%d-%m-%Y' and also by changing the delimiter.\nimport datetime\ntry:\n date = input()\n date_time_obj = datetime.datetime.strptime(date, '%d-%m-%Y')\n print(date_time_obj.strftime('%A'))\nexcept ValueError:\n print(\"Invalid date.\")\n\n", "In MATLAB, Gauss' method\nday_name={'Sun','Mon','Tue','Wed','Thu','Fri','Sat'}\nmonth_offset=[0 3 3 6 1 4 6 2 5 0 3 5]; % common year\n\n% input date\ny1=2022\nm1=11\nd1=22\n\n% is y1 leap\nif mod(y1,4)==0 && mod(y1,100)==0 && mod(y1,400)==0\n month_offset=[0 3 4 0 2 5 0 3 6 1 4 6]; % offset for leap year\nend\n\n% Gregorian calendar\nweekday_gregor=rem( d1+month_offset(m1) + 5*rem(y1-1,4) + 4*rem(y1-1,100) + 6*rem(y1-1,400),7)\n\nday_name{weekday_gregor+1}\n\n0: Sunday 1: Monday .. 6: Saturday\n" ]
[ 1327, 381, 220, 108, 51, 34, 18, 15, 12, 10, 9, 7, 6, 6, 5, 4, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 0, 0 ]
[ "use this code:\nimport pandas as pd\nfrom datetime import datetime\nprint(pd.DatetimeIndex(df['give_date']).day)\n\n" ]
[ -1 ]
[ "date", "datetime", "python", "time", "weekday" ]
stackoverflow_0009847213_date_datetime_python_time_weekday.txt
Q: ValueError: Target size (torch.Size([8, 1])) must be the same as input size (torch.Size([8, 4])) I'm trying to train xlm roberta base for multi label text classification on my dataset of tweets, but I keep getting the following error: --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In [38], line 36 33 b_labels = b_labels.type(torch.LongTensor) 34 #b_labels = torch.nn.functional.one_hot(b_labels.to(torch.LongTensor), 4) 35 #one_hot_label = one_hot_label.float() ---> 36 outputs = model(b_input_ids, 37 token_type_ids=None, 38 attention_mask=b_input_mask, 39 labels=b_labels) 40 # get loss 41 loss = outputs[0] File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\torch\nn\modules\module.py:1190, in Module._call_impl(self, *input, **kwargs) 1186 # If we don't have any hooks, we want to skip the rest of the logic in 1187 # this function, and just call forward. 1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1189 or _global_forward_hooks or _global_forward_pre_hooks): -> 1190 return forward_call(*input, **kwargs) 1191 # Do not call functions when jit is used 1192 full_backward_hooks, non_full_backward_hooks = [], [] File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\transformers\models\roberta\modeling_roberta.py:1245, in RobertaForSequenceClassification.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict) 1243 elif self.config.problem_type == "multi_label_classification": 1244 loss_fct = BCEWithLogitsLoss() -> 1245 loss = loss_fct(logits, labels) 1247 if not return_dict: 1248 output = (logits,) + outputs[2:] File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\torch\nn\modules\module.py:1190, in Module._call_impl(self, *input, **kwargs) 1186 # If we don't have any hooks, we want to skip the rest of the logic in 1187 # this function, and just call forward. 1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1189 or _global_forward_hooks or _global_forward_pre_hooks): -> 1190 return forward_call(*input, **kwargs) 1191 # Do not call functions when jit is used 1192 full_backward_hooks, non_full_backward_hooks = [], [] File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\torch\nn\modules\loss.py:720, in BCEWithLogitsLoss.forward(self, input, target) 719 def forward(self, input: Tensor, target: Tensor) -> Tensor: --> 720 return F.binary_cross_entropy_with_logits(input, target, 721 self.weight, 722 pos_weight=self.pos_weight, 723 reduction=self.reduction) File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\torch\nn\functional.py:3160, in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight) 3157 reduction_enum = _Reduction.get_enum(reduction) 3159 if not (target.size() == input.size()): -> 3160 raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size())) 3162 return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum) ValueError: Target size (torch.Size([8, 1])) must be the same as input size (torch.Size([8, 4])) This is my code: # define batch_size batch_size = 8 # Create the DataLoader for training set train_data = TensorDataset(train_inputs, train_masks, torch.tensor(train_labels)) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) # Create the DataLoader for test set validation_data = TensorDataset(validation_inputs, validation_masks, torch.tensor(validation_labels)) validation_sampler = SequentialSampler(validation_data) validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size) import torch from transformers import AutoTokenizer, XLMRobertaForSequenceClassification, AdamW, get_linear_schedule_with_warmup tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large") model = XLMRobertaForSequenceClassification.from_pretrained( "xlm-roberta-large", problem_type="multi_label_classification", num_labels = len(set(target)), ) model.resize_token_embeddings(len(tokenizer)) # Optimizer & Learning Rate Scheduler optimizer = AdamW(model.parameters(), lr = 2e-5, eps = 1e-8 ) epochs = 4 # Total number of training steps is number of batches * number of epochs. total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, num_training_steps = total_steps) import time # Store the average loss after each epoch loss_values = [] # number of total steps for each epoch print('total steps per epoch: ', len(train_dataloader) / batch_size) # looping over epochs for epoch_i in range(0, epochs): print('training on epoch: ', epoch_i) # set start time t0 = time.time() # reset total loss total_loss = 0 # model in training model.train() # loop through batch for step, batch in enumerate(train_dataloader): # Progress update every 50 step if step % 50 == 0 and not step == 0: print('training on step: ', step) print('total time used is: {0:.2f} s'.format(time.time() - t0)) # load data from dataloader b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) b_labels = b_labels.unsqueeze(1) # clear any previously calculated gradients model.zero_grad() # get outputs b_labels = b_labels.type(torch.LongTensor) outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) # get loss loss = outputs[0] outputs.shape # total loss total_loss += loss.item() # clip the norm of the gradients to 1.0. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # update optimizer optimizer.step() # update learning rate scheduler.step() # Calculate the average loss over the training data. avg_train_loss = total_loss / len(train_dataloader) # Store the loss value for plotting the learning curve. loss_values.append(avg_train_loss) print("average training loss: {0:.2f}".format(avg_train_loss)) I have a batch size of 8, and 4 classes for the target. While trying to fix it I found out it's returning a tuple. So it is outputting a tuple which does not match the output dimension, but I don't know how to fix it. Without b_labels = b_labels.unsqueeze(1) It was returning ValueError: Target size (torch.Size([8])) must be the same as input size (torch.Size([8, 4])) A: It seems that the model waits for a 2-dimensional input object, but you give a single-dimensional instead. It would help if you could show here your Dataset class as well, in order to have a better understanding of the batch structure.
ValueError: Target size (torch.Size([8, 1])) must be the same as input size (torch.Size([8, 4]))
I'm trying to train xlm roberta base for multi label text classification on my dataset of tweets, but I keep getting the following error: --------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In [38], line 36 33 b_labels = b_labels.type(torch.LongTensor) 34 #b_labels = torch.nn.functional.one_hot(b_labels.to(torch.LongTensor), 4) 35 #one_hot_label = one_hot_label.float() ---> 36 outputs = model(b_input_ids, 37 token_type_ids=None, 38 attention_mask=b_input_mask, 39 labels=b_labels) 40 # get loss 41 loss = outputs[0] File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\torch\nn\modules\module.py:1190, in Module._call_impl(self, *input, **kwargs) 1186 # If we don't have any hooks, we want to skip the rest of the logic in 1187 # this function, and just call forward. 1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1189 or _global_forward_hooks or _global_forward_pre_hooks): -> 1190 return forward_call(*input, **kwargs) 1191 # Do not call functions when jit is used 1192 full_backward_hooks, non_full_backward_hooks = [], [] File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\transformers\models\roberta\modeling_roberta.py:1245, in RobertaForSequenceClassification.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict) 1243 elif self.config.problem_type == "multi_label_classification": 1244 loss_fct = BCEWithLogitsLoss() -> 1245 loss = loss_fct(logits, labels) 1247 if not return_dict: 1248 output = (logits,) + outputs[2:] File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\torch\nn\modules\module.py:1190, in Module._call_impl(self, *input, **kwargs) 1186 # If we don't have any hooks, we want to skip the rest of the logic in 1187 # this function, and just call forward. 1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1189 or _global_forward_hooks or _global_forward_pre_hooks): -> 1190 return forward_call(*input, **kwargs) 1191 # Do not call functions when jit is used 1192 full_backward_hooks, non_full_backward_hooks = [], [] File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\torch\nn\modules\loss.py:720, in BCEWithLogitsLoss.forward(self, input, target) 719 def forward(self, input: Tensor, target: Tensor) -> Tensor: --> 720 return F.binary_cross_entropy_with_logits(input, target, 721 self.weight, 722 pos_weight=self.pos_weight, 723 reduction=self.reduction) File ~\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\torch\nn\functional.py:3160, in binary_cross_entropy_with_logits(input, target, weight, size_average, reduce, reduction, pos_weight) 3157 reduction_enum = _Reduction.get_enum(reduction) 3159 if not (target.size() == input.size()): -> 3160 raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size())) 3162 return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum) ValueError: Target size (torch.Size([8, 1])) must be the same as input size (torch.Size([8, 4])) This is my code: # define batch_size batch_size = 8 # Create the DataLoader for training set train_data = TensorDataset(train_inputs, train_masks, torch.tensor(train_labels)) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) # Create the DataLoader for test set validation_data = TensorDataset(validation_inputs, validation_masks, torch.tensor(validation_labels)) validation_sampler = SequentialSampler(validation_data) validation_dataloader = DataLoader(validation_data, sampler=validation_sampler, batch_size=batch_size) import torch from transformers import AutoTokenizer, XLMRobertaForSequenceClassification, AdamW, get_linear_schedule_with_warmup tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-large") model = XLMRobertaForSequenceClassification.from_pretrained( "xlm-roberta-large", problem_type="multi_label_classification", num_labels = len(set(target)), ) model.resize_token_embeddings(len(tokenizer)) # Optimizer & Learning Rate Scheduler optimizer = AdamW(model.parameters(), lr = 2e-5, eps = 1e-8 ) epochs = 4 # Total number of training steps is number of batches * number of epochs. total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, num_training_steps = total_steps) import time # Store the average loss after each epoch loss_values = [] # number of total steps for each epoch print('total steps per epoch: ', len(train_dataloader) / batch_size) # looping over epochs for epoch_i in range(0, epochs): print('training on epoch: ', epoch_i) # set start time t0 = time.time() # reset total loss total_loss = 0 # model in training model.train() # loop through batch for step, batch in enumerate(train_dataloader): # Progress update every 50 step if step % 50 == 0 and not step == 0: print('training on step: ', step) print('total time used is: {0:.2f} s'.format(time.time() - t0)) # load data from dataloader b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) b_labels = b_labels.unsqueeze(1) # clear any previously calculated gradients model.zero_grad() # get outputs b_labels = b_labels.type(torch.LongTensor) outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) # get loss loss = outputs[0] outputs.shape # total loss total_loss += loss.item() # clip the norm of the gradients to 1.0. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # update optimizer optimizer.step() # update learning rate scheduler.step() # Calculate the average loss over the training data. avg_train_loss = total_loss / len(train_dataloader) # Store the loss value for plotting the learning curve. loss_values.append(avg_train_loss) print("average training loss: {0:.2f}".format(avg_train_loss)) I have a batch size of 8, and 4 classes for the target. While trying to fix it I found out it's returning a tuple. So it is outputting a tuple which does not match the output dimension, but I don't know how to fix it. Without b_labels = b_labels.unsqueeze(1) It was returning ValueError: Target size (torch.Size([8])) must be the same as input size (torch.Size([8, 4]))
[ "It seems that the model waits for a 2-dimensional input object, but you give a single-dimensional instead. It would help if you could show here your Dataset class as well, in order to have a better understanding of the batch structure.\n" ]
[ 0 ]
[]
[]
[ "huggingface_transformers", "machine_learning", "python", "pytorch" ]
stackoverflow_0074531373_huggingface_transformers_machine_learning_python_pytorch.txt
Q: Convert list of tuples to list? How do I convert [(1,), (2,), (3,)] to [1, 2, 3] A: Using simple list comprehension: e = [(1,), (2,), (3,)] [i[0] for i in e] will give you: [1, 2, 3] A: @Levon's solution works perfectly for your case. As a side note, if you have variable number of elements in the tuples, you can also use chain from itertools. >>> a = [(1, ), (2, 3), (4, 5, 6)] >>> from itertools import chain >>> list(chain(a)) [(1,), (2, 3), (4, 5, 6)] >>> list(chain(*a)) [1, 2, 3, 4, 5, 6] >>> list(chain.from_iterable(a)) # More efficient version than unpacking [1, 2, 3, 4, 5, 6] A: Here is another alternative if you can have a variable number of elements in the tuples: >>> a = [(1,), (2, 3), (4, 5, 6)] >>> [x for t in a for x in t] [1, 2, 3, 4, 5, 6] This is basically just a shortened form of the following loops: result = [] for t in a: for x in t: result.append(x) A: >>> a = [(1,), (2,), (3,)] >>> zip(*a)[0] (1, 2, 3) For a list: >>> list(zip(*a)[0]) [1, 2, 3] A: >>> a = [(1,), (2,), (3,)] >>> b = map(lambda x: x[0], a) >>> b [1, 2, 3] With python3, you have to put the list(..) function to the output of map(..), i.e. b = list(map(lambda x: x[0], a)) This is the best solution in one line using python built-in functions. A: You can also use sum function as follows: e = [(1,), (2,), (3,)] e_list = list(sum(e, ())) And it also works with list of lists to convert it into a single list, but you will need to use it as follow: e = [[1, 2], [3, 4], [5, 6]] e_list = list(sum(e, [])) This will give you [1, 2, 3, 4, 5, 6] A: There's always a way to extract a list from another list by ...for...in.... In this case it would be: [i[0] for i in e] A: Using operator or sum >>> from functools import reduce ### If python 3 >>> import operator >>> a = [(1,), (2,), (3,)] >>> list(reduce(operator.concat, a)) [1, 2, 3] (OR) >>> list(sum(a,())) [1, 2, 3] >>> If in python > 3 please do the import of reduce from functools like from functools import reduce https://docs.python.org/3/library/functools.html#functools.reduce A: You can also unpack the tuple in the list comprehension: e = [(1,), (2,), (3,)] [i for (i,) in e] will still give: [1, 2, 3] A: One Liner yo! list(*zip(*[(1,), (2,), (3,)])) A: In these situations I like to do: a = [(1,), (2,), (3,)] new_a = [element for tup in a for element in tup] This works even if your tuples have more than one element. This is equivalent to doing this: a = [(1,), (2,), (3,)] new_a = [] for tup in a: for element in tup: new_a.append(element) A: If it is already a numpy array, use ravel() method which is more faster than list comprehension. If it is already a list, list comprehension is better. Most of the answers above only prints the first element not all the elements For numpy arrays #arr = np.array([(1,2), (2,3), (3,4)]) #faster than list comprehension arr.ravel().tolist() #output => [1,2,2,3,3,4] For list list_ = [(1,2), (2,3), (3,4)] [x for y in list_ for x in y] #output => [1,2,2,3,3,4]
Convert list of tuples to list?
How do I convert [(1,), (2,), (3,)] to [1, 2, 3]
[ "Using simple list comprehension:\ne = [(1,), (2,), (3,)]\n[i[0] for i in e]\n\nwill give you:\n[1, 2, 3]\n\n", "@Levon's solution works perfectly for your case.\nAs a side note, if you have variable number of elements in the tuples, you can also use chain from itertools.\n>>> a = [(1, ), (2, 3), (4, 5, 6)]\n>>> from itertools import chain\n>>> list(chain(a))\n[(1,), (2, 3), (4, 5, 6)]\n>>> list(chain(*a))\n[1, 2, 3, 4, 5, 6]\n>>> list(chain.from_iterable(a)) # More efficient version than unpacking\n[1, 2, 3, 4, 5, 6]\n\n", "Here is another alternative if you can have a variable number of elements in the tuples:\n>>> a = [(1,), (2, 3), (4, 5, 6)]\n>>> [x for t in a for x in t]\n[1, 2, 3, 4, 5, 6]\n\nThis is basically just a shortened form of the following loops:\nresult = []\nfor t in a:\n for x in t:\n result.append(x)\n\n", ">>> a = [(1,), (2,), (3,)]\n>>> zip(*a)[0]\n(1, 2, 3)\n\nFor a list:\n>>> list(zip(*a)[0])\n[1, 2, 3]\n\n", ">>> a = [(1,), (2,), (3,)]\n>>> b = map(lambda x: x[0], a)\n>>> b\n[1, 2, 3]\n\nWith python3, you have to put the list(..) function to the output of map(..), i.e. \nb = list(map(lambda x: x[0], a))\n\nThis is the best solution in one line using python built-in functions.\n", "You can also use sum function as follows:\ne = [(1,), (2,), (3,)] \ne_list = list(sum(e, ()))\n\nAnd it also works with list of lists to convert it into a single list, but you will need to use it as follow:\ne = [[1, 2], [3, 4], [5, 6]]\ne_list = list(sum(e, []))\n\nThis will give you [1, 2, 3, 4, 5, 6]\n", "There's always a way to extract a list from another list by ...for...in.... In this case it would be:\n[i[0] for i in e]\n", "Using operator or sum\n>>> from functools import reduce ### If python 3\n>>> import operator\n>>> a = [(1,), (2,), (3,)]\n>>> list(reduce(operator.concat, a))\n[1, 2, 3]\n\n(OR)\n>>> list(sum(a,()))\n[1, 2, 3]\n>>> \n\nIf in python > 3 please do the import of reduce from functools \nlike from functools import reduce\nhttps://docs.python.org/3/library/functools.html#functools.reduce\n", "You can also unpack the tuple in the list comprehension:\ne = [(1,), (2,), (3,)]\n[i for (i,) in e]\n\nwill still give:\n[1, 2, 3]\n\n", "One Liner yo! \nlist(*zip(*[(1,), (2,), (3,)]))\n\n", "In these situations I like to do:\na = [(1,), (2,), (3,)]\nnew_a = [element for tup in a for element in tup]\n\nThis works even if your tuples have more than one element. This is equivalent to doing this:\na = [(1,), (2,), (3,)]\nnew_a = []\nfor tup in a:\n for element in tup:\n new_a.append(element)\n\n", "If it is already a numpy array, use ravel() method which is more faster than list comprehension.\nIf it is already a list, list comprehension is better.\n\nMost of the answers above only prints the first element not all the elements\n\n\nFor numpy arrays\n#arr = np.array([(1,2), (2,3), (3,4)])\n\n#faster than list comprehension\narr.ravel().tolist()\n\n#output => [1,2,2,3,3,4]\n\nFor list\nlist_ = [(1,2), (2,3), (3,4)]\n\n[x for y in list_ for x in y]\n\n#output => [1,2,2,3,3,4]\n\n" ]
[ 95, 66, 31, 7, 7, 6, 4, 4, 3, 2, 0, 0 ]
[]
[]
[ "python" ]
stackoverflow_0010941229_python.txt
Q: Web Scraping with table that can be changed I have succesfully managed to set together a script now that extracts some information from a table on this website: https://www.nordpoolgroup.com/en/Market-data1/Power-system-data/Production1/Wind-Power-Prognosis/SE/Hourly/?view=table Now, I want to do this for all dates of 2021. I suppose I have to use the input id="data-end-date" and activate some kind of button pusher, but I don't understand how this can be done theoretically and have not managed to find any similar questions. options = webdriver.ChromeOptions() options.add_experimental_option("detach", True)#optional webdriver_service = Service("./chromedriver") #Your chromedriver path driver = webdriver.Chrome(service=webdriver_service,options=options) data = [] driver.get('https://www.nordpoolgroup.com/en/Market-data1/Power-system-data/Production1/Wind-Power-Prognosis/SE/Hourly/?view=table') time.sleep(3) WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@class="pure-button"]'))).click() time.sleep(1) soup = BeautifulSoup(driver.page_source,"html.parser") df1 = pd.read_html(str(soup))[1] df1.drop(columns=['22-11-2022', 'SE'], inplace=True) df1.drop(range(24,29), axis=0, inplace=True) print(df1) Thank you. A: You would need to control the date picker and loop over all the dates. An alternative solution would be to look into the browsers dev tools and analyze the traffic from your client to the server. There you see that with each change in the date picker a GET request to the server gets fired and a json with all the data comes back. Luckily the GET request does not have any requirements and works even in the browser: https://www.nordpoolgroup.com/api/marketdata/page/576?currency=,EUR,EUR,EUR&endDate=15-11-2022 And, as a url parameter, you can even pass the date you want. The response is a json including the whole table. You just need to loop over all the dates from 2021 and parse that json data.
Web Scraping with table that can be changed
I have succesfully managed to set together a script now that extracts some information from a table on this website: https://www.nordpoolgroup.com/en/Market-data1/Power-system-data/Production1/Wind-Power-Prognosis/SE/Hourly/?view=table Now, I want to do this for all dates of 2021. I suppose I have to use the input id="data-end-date" and activate some kind of button pusher, but I don't understand how this can be done theoretically and have not managed to find any similar questions. options = webdriver.ChromeOptions() options.add_experimental_option("detach", True)#optional webdriver_service = Service("./chromedriver") #Your chromedriver path driver = webdriver.Chrome(service=webdriver_service,options=options) data = [] driver.get('https://www.nordpoolgroup.com/en/Market-data1/Power-system-data/Production1/Wind-Power-Prognosis/SE/Hourly/?view=table') time.sleep(3) WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@class="pure-button"]'))).click() time.sleep(1) soup = BeautifulSoup(driver.page_source,"html.parser") df1 = pd.read_html(str(soup))[1] df1.drop(columns=['22-11-2022', 'SE'], inplace=True) df1.drop(range(24,29), axis=0, inplace=True) print(df1) Thank you.
[ "You would need to control the date picker and loop over all the dates. An alternative solution would be to look into the browsers dev tools and analyze the traffic from your client to the server.\nThere you see that with each change in the date picker a GET request to the server gets fired and a json with all the data comes back. Luckily the GET request does not have any requirements and works even in the browser:\nhttps://www.nordpoolgroup.com/api/marketdata/page/576?currency=,EUR,EUR,EUR&endDate=15-11-2022\nAnd, as a url parameter, you can even pass the date you want.\nThe response is a json including the whole table. You just need to loop over all the dates from 2021 and parse that json data.\n" ]
[ 1 ]
[]
[]
[ "python", "selenium", "web_scraping" ]
stackoverflow_0074530651_python_selenium_web_scraping.txt
Q: Implement lambda function from python to pyspark-Pyspark Python: I have a dataframe that I am applying a lambda function to check the conditions based on the values of a column. In Pandas it looks like this(Example): new_df = df1.merge(df2, how='left', left_on='lkey', right_on='rkey') lkey value_x rkey value_y col1 col2 col3 col4 col5 0 foo one foo five 0 1 3 0 5 1 foo one foo NaN 1 0 2 4 0 2 bar two bar six 2 6 3 0 0 3 foo five foo five 7 2 0 0 0 4 foo five foo NaN 2 0 0 0 0 5 bbb four bar two 0 0 0 0 0 def get_final_au(row): if row['col5'] == 0: if row['col4'] == 0: if row['col3'] == 0: if row['col2'] == 0: return 'NOT FOUND' else: return row['col2'] else: return row['col3'] else: return row['col4'] else: return row['col5'] new_df['col6'] = new_df.apply (lambda row: get_final_au(row),axis=1) Expected Output: lkey value_x rkey value_y col1 col2 col3 col4 col5 col6 0 foo one foo five 0 1 3 0 5 5 1 foo one foo NaN 1 0 2 4 0 4 2 bar two bar six 2 6 3 0 0 3 3 foo five foo five 7 2 0 0 0 2 4 foo five foo NaN 2 0 0 0 0 Not FOUND 5 bbb four bar two 0 0 0 0 0 Not FOUND Pyspark: How would I do something similar in Pyspark? new_df = new_df.withColumn('col6', ?) I have tried this but getting error. Please suggest from pyspark.sql.functions import udf def get_final_au(row): if row['col5'] != 0: return row['col5'] elif row['col4'] != 0: return row['col4'] elif row['col3'] != 0: return row['col3'] elif row['col2'] != 0: return row['col2'] else: return 'NOT FOUND' UDF_NAME = udf(lambda row: get_final_au(row), StringType()) new_df.withColumn('col6', UDF_NAME('col5','col4','col3','col2')).show(2,False) A: I think you can use UDF function OR when clause. when clause will be easier. Syntax will be like this for UDF from pyspark.sql.functions import udf def function_name(arg): # Logic # Return value # Register the UDF UDF_NAME = udf(function_name, ArgType()) df.select(UDF_NAME('col').alias('new_col')) for when clause df.withColumn("new_column", when(condition1, value).when(condition2, value).otherwise(value)) A: Possible duplicate of: Apply a function to a single column of a csv in Spark Suggestion: Modify get_final_au to this def get_final_au(row): if row['col2'] != 0: return row['col2'] elif row['col3'] != 0: return row['col3'] elif row['col4'] != 0: return row['col4'] elif row['col5'] != 0: return row['col5'] else: return 'NOT FOUND' A: udfs are the last resort. they are quite slow. Can come with the same outcome using the following cols=[ 'col1', 'col2', 'col3', 'col4','col5'] new_df.select(*[when(col(x)==0,'NOT FOUND').otherwise(col(x)).alias(x) for x in cols]).show()
Implement lambda function from python to pyspark-Pyspark
Python: I have a dataframe that I am applying a lambda function to check the conditions based on the values of a column. In Pandas it looks like this(Example): new_df = df1.merge(df2, how='left', left_on='lkey', right_on='rkey') lkey value_x rkey value_y col1 col2 col3 col4 col5 0 foo one foo five 0 1 3 0 5 1 foo one foo NaN 1 0 2 4 0 2 bar two bar six 2 6 3 0 0 3 foo five foo five 7 2 0 0 0 4 foo five foo NaN 2 0 0 0 0 5 bbb four bar two 0 0 0 0 0 def get_final_au(row): if row['col5'] == 0: if row['col4'] == 0: if row['col3'] == 0: if row['col2'] == 0: return 'NOT FOUND' else: return row['col2'] else: return row['col3'] else: return row['col4'] else: return row['col5'] new_df['col6'] = new_df.apply (lambda row: get_final_au(row),axis=1) Expected Output: lkey value_x rkey value_y col1 col2 col3 col4 col5 col6 0 foo one foo five 0 1 3 0 5 5 1 foo one foo NaN 1 0 2 4 0 4 2 bar two bar six 2 6 3 0 0 3 3 foo five foo five 7 2 0 0 0 2 4 foo five foo NaN 2 0 0 0 0 Not FOUND 5 bbb four bar two 0 0 0 0 0 Not FOUND Pyspark: How would I do something similar in Pyspark? new_df = new_df.withColumn('col6', ?) I have tried this but getting error. Please suggest from pyspark.sql.functions import udf def get_final_au(row): if row['col5'] != 0: return row['col5'] elif row['col4'] != 0: return row['col4'] elif row['col3'] != 0: return row['col3'] elif row['col2'] != 0: return row['col2'] else: return 'NOT FOUND' UDF_NAME = udf(lambda row: get_final_au(row), StringType()) new_df.withColumn('col6', UDF_NAME('col5','col4','col3','col2')).show(2,False)
[ "I think you can use UDF function OR when clause.\nwhen clause will be easier.\nSyntax will be like this for UDF\nfrom pyspark.sql.functions import udf\n\ndef function_name(arg):\n # Logic\n # Return value\n\n# Register the UDF\nUDF_NAME = udf(function_name, ArgType())\n\ndf.select(UDF_NAME('col').alias('new_col'))\n\nfor when clause\n df.withColumn(\"new_column\", when(condition1, value).when(condition2, value).otherwise(value))\n\n", "Possible duplicate of: Apply a function to a single column of a csv in Spark\nSuggestion:\nModify get_final_au to this\ndef get_final_au(row):\n if row['col2'] != 0:\n return row['col2']\n elif row['col3'] != 0:\n return row['col3']\n elif row['col4'] != 0:\n return row['col4']\n elif row['col5'] != 0:\n return row['col5']\n else:\n return 'NOT FOUND'\n\n", "udfs are the last resort. they are quite slow. Can come with the same outcome using the following\ncols=[ 'col1', 'col2', 'col3', 'col4','col5']\nnew_df.select(*[when(col(x)==0,'NOT FOUND').otherwise(col(x)).alias(x) for x in cols]).show()\n\n" ]
[ 1, 0, 0 ]
[]
[]
[ "apache_spark_sql", "pyspark", "python", "user_defined_functions" ]
stackoverflow_0069061074_apache_spark_sql_pyspark_python_user_defined_functions.txt
Q: Filter dictionary based on value in list of nested dictionary I have the following dictionary that contains a list, in which the individual elements are nested dictionaries. id_config = { 'expectations' : [ { "kwargs": { "column": "id", "value": 14 }, "expectation_type": "expect_column_value_lengths_to_equal", "meta": {} }, { "kwargs": { "column": "id_person", "regex": "^WPS*|^APS*" }, "expectation_type": "expect_column_values_to_match_regex", "meta": {} }, { "kwargs": { "column": "some_other_id", "value": 19 }, "expectation_type": "expect_column_value_lengths_to_equal", "meta": {} } ] } I would like to find a way to filter out all the relevant list elements that contain the the values ['id_person', 'some_other_id'] under the nested key 'column' and return the dictionary format. The code below works. def filter_nested_dict_list(nested_config, value_to_find): filtered_nested_config = {'expectations':[]} for position, element in enumerate(nested_config['expectations']): if value_to_find in element['kwargs']['column']: filtered_nested_config['expectations'].append(nested_config['expectations'][position]) return filtered_nested_config col_expectations = ['id_person', 'some_other_id'] filtered_dict = [] for col in col_expectations: filtered_dict.append(filter_nested_dict_list(id_config, col)) with the result being { 'expectations' : [ { "kwargs": { "column": "id_person", "regex": "^WPS*|^APS*" }, "expectation_type": "expect_column_values_to_match_regex", "meta": {} }, { "kwargs": { "column": "some_other_id", "value": 19 }, "expectation_type": "expect_column_value_lengths_to_equal", "meta": {} } ] } In this example is works well with two values_to_find, however as the list grows I'd rather not have so many for loops. So my question is- is there a neater way than running those for loops and hardcoding the nested dictionary structure? Any improvement suggestions would be very much appreciated. Thanks! A: The pandas module can do this. You can make a pandas.DataFrame from the 'expectations' list and filter out the values you don't want quite easily. This provides some examples on how to do it.
Filter dictionary based on value in list of nested dictionary
I have the following dictionary that contains a list, in which the individual elements are nested dictionaries. id_config = { 'expectations' : [ { "kwargs": { "column": "id", "value": 14 }, "expectation_type": "expect_column_value_lengths_to_equal", "meta": {} }, { "kwargs": { "column": "id_person", "regex": "^WPS*|^APS*" }, "expectation_type": "expect_column_values_to_match_regex", "meta": {} }, { "kwargs": { "column": "some_other_id", "value": 19 }, "expectation_type": "expect_column_value_lengths_to_equal", "meta": {} } ] } I would like to find a way to filter out all the relevant list elements that contain the the values ['id_person', 'some_other_id'] under the nested key 'column' and return the dictionary format. The code below works. def filter_nested_dict_list(nested_config, value_to_find): filtered_nested_config = {'expectations':[]} for position, element in enumerate(nested_config['expectations']): if value_to_find in element['kwargs']['column']: filtered_nested_config['expectations'].append(nested_config['expectations'][position]) return filtered_nested_config col_expectations = ['id_person', 'some_other_id'] filtered_dict = [] for col in col_expectations: filtered_dict.append(filter_nested_dict_list(id_config, col)) with the result being { 'expectations' : [ { "kwargs": { "column": "id_person", "regex": "^WPS*|^APS*" }, "expectation_type": "expect_column_values_to_match_regex", "meta": {} }, { "kwargs": { "column": "some_other_id", "value": 19 }, "expectation_type": "expect_column_value_lengths_to_equal", "meta": {} } ] } In this example is works well with two values_to_find, however as the list grows I'd rather not have so many for loops. So my question is- is there a neater way than running those for loops and hardcoding the nested dictionary structure? Any improvement suggestions would be very much appreciated. Thanks!
[ "The pandas module can do this.\nYou can make a pandas.DataFrame from the 'expectations' list and filter out the values you don't want quite easily.\nThis provides some examples on how to do it.\n" ]
[ 1 ]
[]
[]
[ "dictionary", "list", "python" ]
stackoverflow_0074531924_dictionary_list_python.txt
Q: How to print all rows using iterrows method in pandas module? birthdays.csv contain data: name,email,year,month,day Vishal,abc@email.com,2002,11,22 Riya,xyz@mail.com,2003,11,22 with open("birthdays.csv", "r") as file: data = pandas.read_csv(file) birthdays_dict = {(row['month'], row['day']): row for (index, row) in data.iterrows()} print(birthdays_dict) output: {(11, 22): name Riya email xyz@mail.com year 2003 month 11 day 22 Name: 1, dtype: object} expecting to get both rows I was trying to solve a question of printing data using iterrows. But i could not print my first row . Can anyone plz explain and give me how to print both rows using iterrows itself. A: Because birthdays_dict is a dictionary. And keys in the dictionary are unique. While your row['month'], row['day'] pair is the same in both rows, so the second row overrides the first row in birthdays_dict. You need to use a list or different key (e.x. row['year'], row['month'], row['day']). Side note, you can use read_csv with file name instead of file: data = pandas.read_csv("birthdays.csv") birthdays_dict = {(row['year'], row['month'], row['day']): row for (index, row) in data.iterrows()} print(birthdays_dict)
How to print all rows using iterrows method in pandas module?
birthdays.csv contain data: name,email,year,month,day Vishal,abc@email.com,2002,11,22 Riya,xyz@mail.com,2003,11,22 with open("birthdays.csv", "r") as file: data = pandas.read_csv(file) birthdays_dict = {(row['month'], row['day']): row for (index, row) in data.iterrows()} print(birthdays_dict) output: {(11, 22): name Riya email xyz@mail.com year 2003 month 11 day 22 Name: 1, dtype: object} expecting to get both rows I was trying to solve a question of printing data using iterrows. But i could not print my first row . Can anyone plz explain and give me how to print both rows using iterrows itself.
[ "Because birthdays_dict is a dictionary. And keys in the dictionary are unique. While your row['month'], row['day'] pair is the same in both rows, so the second row overrides the first row in birthdays_dict. You need to use a list or different key (e.x. row['year'], row['month'], row['day']).\nSide note, you can use read_csv with file name instead of file:\ndata = pandas.read_csv(\"birthdays.csv\")\nbirthdays_dict = {(row['year'], row['month'], row['day']): row for (index, row) in data.iterrows()}\nprint(birthdays_dict)\n\n" ]
[ 0 ]
[]
[]
[ "pandas", "python" ]
stackoverflow_0074531906_pandas_python.txt
Q: Connect to Milvus standalone server (in docker container) from another docker container on the same host When I run Milvus in standalone mode on docker (by executing docker-compose on the default Milvus docker-compose.yml file, resulting in the three containers being created), I cannot connect to the Milvus server from a task running in another docker container on the same host. I have configured this container to be on the same network as the Milvus server, and I can ping the Milvus server from this container via the Milvus server's IP. In the task container I run: connections.connect( alias="default", host='192.168.192.4', port='19530', secure=False ) The error log shows: Traceback (most recent call last): File "task.py", line 45, in secure=True File "/usr/local/lib/python3.7/site-packages/pymilvus/orm/connections.py", line 262, in connect connect_milvus(**kwargs, password=password) File "/usr/local/lib/python3.7/site-packages/pymilvus/orm/connections.py", line 233, in connect_milvus gh._wait_for_channel_ready() File "/usr/local/lib/python3.7/site-packages/pymilvus/client/grpc_handler.py", line 118, in _wait_for_channel_ready raise MilvusException(Status.CONNECT_FAILED, f'Fail connecting to server on {self._address}. Timeout') pymilvus.exceptions.MilvusException: <MilvusException: (code=2, message=Fail connecting to server on 192.168.192.4:19530. Timeout)> 192.168.192.4 is the ip address of the milvus-standalone container. A: Turns out this is not a Milvus issue. The problem was caused by our corporate network, and the proxy requirement. In the dockerfile, I need to set the proxy settings to be able to pull images. However, this sets the proxy settings during builld and for the container. These proxy settings prevented communication between the containers. The proxy setting needs to be reset in the dockerfile. The fix looked like this: FROM python:3.9.12 ENV https_proxy <proxy settings> COPY requirements.txt / RUN pip3 install --proxy <proxy settings> -r requirements.txt COPY task.py / ENV https_proxy "" CMD ["python3", "-u", "task.py"]
Connect to Milvus standalone server (in docker container) from another docker container on the same host
When I run Milvus in standalone mode on docker (by executing docker-compose on the default Milvus docker-compose.yml file, resulting in the three containers being created), I cannot connect to the Milvus server from a task running in another docker container on the same host. I have configured this container to be on the same network as the Milvus server, and I can ping the Milvus server from this container via the Milvus server's IP. In the task container I run: connections.connect( alias="default", host='192.168.192.4', port='19530', secure=False ) The error log shows: Traceback (most recent call last): File "task.py", line 45, in secure=True File "/usr/local/lib/python3.7/site-packages/pymilvus/orm/connections.py", line 262, in connect connect_milvus(**kwargs, password=password) File "/usr/local/lib/python3.7/site-packages/pymilvus/orm/connections.py", line 233, in connect_milvus gh._wait_for_channel_ready() File "/usr/local/lib/python3.7/site-packages/pymilvus/client/grpc_handler.py", line 118, in _wait_for_channel_ready raise MilvusException(Status.CONNECT_FAILED, f'Fail connecting to server on {self._address}. Timeout') pymilvus.exceptions.MilvusException: <MilvusException: (code=2, message=Fail connecting to server on 192.168.192.4:19530. Timeout)> 192.168.192.4 is the ip address of the milvus-standalone container.
[ "Turns out this is not a Milvus issue. The problem was caused by our corporate network, and the proxy requirement. In the dockerfile, I need to set the proxy settings to be able to pull images. However, this sets the proxy settings during builld and for the container. These proxy settings prevented communication between the containers. The proxy setting needs to be reset in the dockerfile. The fix looked like this:\nFROM python:3.9.12\n\nENV https_proxy <proxy settings>\n\nCOPY requirements.txt /\n\nRUN pip3 install --proxy <proxy settings> -r requirements.txt\n\nCOPY task.py /\n\nENV https_proxy \"\"\n\nCMD [\"python3\", \"-u\", \"task.py\"]\n\n" ]
[ 0 ]
[]
[]
[ "docker", "milvus", "python" ]
stackoverflow_0074520985_docker_milvus_python.txt
Q: Is 'input' a keyword in Python? I'm new to Python. I'm writing some code in Sublime and it highlights the word 'input' I use it as a variable name and it seems to work, so I wondered whether it may be a keyword in a newer version. (I'm currently using 2.7.5) A: No, input is not a keyword. Instead, it is a built-in function. And yes, you can create a variable with the name input. But please don't. Doing so is a bad practice because it overshadows the built-in (makes it unusable in the current scope). If you must use the name input, the convention is to place an underscore after it: input_ = input() A: input is not a keyword, it's a function provided by the standard library and included in the builtins module (this module provides globally accessible variables and functions.): >>> import builtins >>> input is builtins.input True And sure, you can create a variable with the name input. It's perfectly fine for experienced and intermediate users to do so because they can easily figure that the input name has been re-used. Use the best name for the content/intent you want to convey. If input is the best then use it (provided you don't need the builtin), and don't confuse readers with names like input_ (beginners will wonder whether there's a special meaning to a trailing underscore) But if you're a beginner please don't re-define builtins, by overshadowing the built-in input (overshadowing a variable makes it unusable in the current scope) you'll end-up with this error when calling input() later on (in the same scope) and you may struggle to figure out why: TypeError: 'str' object is not callable Beginners should instead use another name, preferably not input_ because underscores have special meanings in python, as a result other beginners will wonder whether there's a special meaning for that trailing underscore (is it the same or related to leading underscores? or maybe to double underscores?) In another comment someone stated that it is a bad practice to overshadow variables and he even came up with a convention that he borrowed from another use. After all, if overshadowing variables were a really bad practice, the python language designers wouldn't have allowed it in the first place, but they know and recognize that it has the potential to improve readability, just as it does in other languages. So they allowed it, and it also ease transition to Python from other languages where overshadowing is also allowed like C/C++, Java and even bash. note: the conventional use for a trailing underscore is where it's impossible to use a name, like the keyword class in Python. Then you'd use class_ (but like I wrote above, it's best to avoid it in Python because underscores can confuse beginners as they can convey special meanings)
Is 'input' a keyword in Python?
I'm new to Python. I'm writing some code in Sublime and it highlights the word 'input' I use it as a variable name and it seems to work, so I wondered whether it may be a keyword in a newer version. (I'm currently using 2.7.5)
[ "No, input is not a keyword. Instead, it is a built-in function.\nAnd yes, you can create a variable with the name input. But please don't. Doing so is a bad practice because it overshadows the built-in (makes it unusable in the current scope).\nIf you must use the name input, the convention is to place an underscore after it:\ninput_ = input()\n\n", "input is not a keyword, it's a function provided by the standard library and included in the builtins module (this module provides globally accessible variables and functions.):\n>>> import builtins\n>>> input is builtins.input\nTrue\n\nAnd sure, you can create a variable with the name input. It's perfectly fine for experienced and intermediate users to do so because they can easily figure that the input name has been re-used.\nUse the best name for the content/intent you want to convey. If input is the best then use it (provided you don't need the builtin), and don't confuse readers with names like input_ (beginners will wonder whether there's a special meaning to a trailing underscore)\nBut if you're a beginner please don't re-define builtins, by overshadowing the built-in input (overshadowing a variable makes it unusable in the current scope) you'll end-up with this error when calling input() later on (in the same scope) and you may struggle to figure out why:\nTypeError: 'str' object is not callable\n\nBeginners should instead use another name, preferably not input_ because underscores have special meanings in python, as a result other beginners will wonder whether there's a special meaning for that trailing underscore (is it the same or related to leading underscores? or maybe to double underscores?)\nIn another comment someone stated that it is a bad practice to overshadow variables and he even came up with a convention that he borrowed from another use. After all, if overshadowing variables were a really bad practice, the python language designers wouldn't have allowed it in the first place, but they know and recognize that it has the potential to improve readability, just as it does in other languages. So they allowed it, and it also ease transition to Python from other languages where overshadowing is also allowed like C/C++, Java and even bash.\nnote: the conventional use for a trailing underscore is where it's impossible to use a name, like the keyword class in Python. Then you'd use class_ (but like I wrote above, it's best to avoid it in Python because underscores can confuse beginners as they can convey special meanings)\n" ]
[ 54, 0 ]
[]
[]
[ "python" ]
stackoverflow_0020670732_python.txt
Q: Get historical monthly stock close price in custom format using yfinance I need to get historical prices of the best stocks in the following format: [ {'AMZN': [ {'Sep 2022': 113}, {'Oct 2022': 102}, {'Nov 2022': 92} ]}, {'AAPL': [ {'Sep 2022': 137}, {'Oct 2022': 153}, {'Nov 2022': 147} ]}, {'MSFT': [ {'Sep 2022': 232}, {'Oct 2022': 231}, {'Nov 2022': 241} ]} ] But can't figure how to pass proper configuration to yfinance, or if it's not possible to get only close prices, what is the best way to convert output to the format I need. Played around with dataframe.to_dict("records"), but getting only prices without dates. Here is my code from datetime import date import yfinance as yf tickers = ['AMZN', 'AAPL', 'MSFT'] data = yf.download( tickers = tickers, start="2019-01-01", end=date.today().replace(day=2), interval = "1mo", group_by = 'ticker' ) A: Got it to work: [{ticker: [{str(x.strftime('%b %Y')): int(tickerdata[ticker]['Close'][x])} for x in tickerdata[ticker]['Close'].index]} for ticker in tickers]
Get historical monthly stock close price in custom format using yfinance
I need to get historical prices of the best stocks in the following format: [ {'AMZN': [ {'Sep 2022': 113}, {'Oct 2022': 102}, {'Nov 2022': 92} ]}, {'AAPL': [ {'Sep 2022': 137}, {'Oct 2022': 153}, {'Nov 2022': 147} ]}, {'MSFT': [ {'Sep 2022': 232}, {'Oct 2022': 231}, {'Nov 2022': 241} ]} ] But can't figure how to pass proper configuration to yfinance, or if it's not possible to get only close prices, what is the best way to convert output to the format I need. Played around with dataframe.to_dict("records"), but getting only prices without dates. Here is my code from datetime import date import yfinance as yf tickers = ['AMZN', 'AAPL', 'MSFT'] data = yf.download( tickers = tickers, start="2019-01-01", end=date.today().replace(day=2), interval = "1mo", group_by = 'ticker' )
[ "Got it to work:\n[{ticker: [{str(x.strftime('%b %Y')): int(tickerdata[ticker]['Close'][x])} for x in tickerdata[ticker]['Close'].index]} for ticker in tickers]\n\n" ]
[ 0 ]
[]
[]
[ "data_manipulation", "dataframe", "python", "python_3.x", "yfinance" ]
stackoverflow_0074524552_data_manipulation_dataframe_python_python_3.x_yfinance.txt
Q: Raspberry Pi Camera streaming to multiple clients In my project I'm making a drone with a raspberry pi. I need to stream video from my raspberry pi camera with as low latency as possible and share that stream to multiple clients. I achieved a simple stream basing on the code : import io import picamera import logging import socketserver from threading import Condition from http import server PAGE="""\ <html> <head> <title>picamera MJPEG streaming</title> </head> <body> <h1>PiCamera MJPEG Streaming</h1> <img src="stream.mjpg" width="640" height="480" /> </body> </html> """ class StreamingOutput(object): def __init__(self): self.frame = None self.buffer = io.BytesIO() self.condition = Condition() def write(self, buf): if buf.startswith(b'\xff\xd8'): # New frame, copy the existing buffer's content and notify all # clients it's available self.buffer.truncate() with self.condition: self.frame = self.buffer.getvalue() self.condition.notify_all() self.buffer.seek(0) return self.buffer.write(buf) class StreamingHandler(server.BaseHTTPRequestHandler): def do_GET(self): if self.path == '/': self.send_response(301) self.send_header('Location', '/index.html') self.end_headers() elif self.path == '/index.html': content = PAGE.encode('utf-8') self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Content-Length', len(content)) self.end_headers() self.wfile.write(content) elif self.path == '/stream.mjpg': self.send_response(200) self.send_header('Age', 0) self.send_header('Cache-Control', 'no-cache, private') self.send_header('Pragma', 'no-cache') self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME') self.end_headers() try: while True: with output.condition: output.condition.wait() frame = output.frame self.wfile.write(b'--FRAME\r\n') self.send_header('Content-Type', 'image/jpeg') self.send_header('Content-Length', len(frame)) self.end_headers() self.wfile.write(frame) self.wfile.write(b'\r\n') except Exception as e: logging.warning( 'Removed streaming client %s: %s', self.client_address, str(e)) else: self.send_error(404) self.end_headers() class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer): allow_reuse_address = True daemon_threads = True with picamera.PiCamera(resolution='640x480', framerate=24) as camera: output = StreamingOutput() camera.start_recording(output, format='mjpeg') try: address = ('', 8000) server = StreamingServer(address, StreamingHandler) server.serve_forever() finally: camera.stop_recording() The above code starts a streaming server on the raspberry pi and provides a view from picamera. The latency is around 200ms and that is really good for me, but when more clients connect the latency is increasing. With 5 users, there is already quite a lot of lag. This is the main problem in my project, because I have to stream in real time for all users. I predict 20-30 users. I'm just trying achive similar effect to youtube livestreams or google meets where multiple clients can connect and watch view from my webcam but with the least latency. So I thought about creating a second server that would be only receive one stream from my picamera and then would restream it to multiple clients without much delay. I tried to make a similar server to the one with raspberry pi based on the above code, but with the opencv. The picamera stream was intercepted with opencv and displayed, but unfortunately the latency was quite high. I tried also streaming to youtube via rtmp protocol with raspivid and to datarhei restreamer on docker via rtsp protocol with gstreamer, but the latency is around 2-3s and that is too much for me. Is there any way to do that or maybe other solutions? I spent a lot of time for that and I have no idea how to do that... Any advice would be helpful, because I'm newbie in video streaming. PS Sorry for my unperfect english A: Video streaming often ends up being a balance between bandwidth, latency and quality. Most online movie and live entertainment, sports etc streaming services use HSL or DASH streaming to provide the quality they need and will have a much higher latency that you are aiming for. Similarly, serving many individual clients on a small PI device is likely to overrun the devices capacity as you have seen. I think your idea of having the drone send the stream to a server which then provides streams to clients is the most viable approach, but you probably want to look at a real time streaming protocol based server. WebRTC is the most obvious current choice, I think, for this type of streaming - it is specifically designed to prioritise latency over other factors, like quality, to enable real time voice and video communications. Open source webRTC server exists which would be good starting point - e.g.: https://github.com/meetecho/janus-gateway Paid solutions also exists, like the enterprise edition of Ant Media WebRTC steaming server that lists IP Camera streaming amongst its usage scenarios: https://github.com/ant-media/Ant-Media-Server There is also a good overview of a node.js based WebRTC solution here which reportedly achieves 400ms latency if that meets your needs: https://stackoverflow.com/a/67887822/334402
Raspberry Pi Camera streaming to multiple clients
In my project I'm making a drone with a raspberry pi. I need to stream video from my raspberry pi camera with as low latency as possible and share that stream to multiple clients. I achieved a simple stream basing on the code : import io import picamera import logging import socketserver from threading import Condition from http import server PAGE="""\ <html> <head> <title>picamera MJPEG streaming</title> </head> <body> <h1>PiCamera MJPEG Streaming</h1> <img src="stream.mjpg" width="640" height="480" /> </body> </html> """ class StreamingOutput(object): def __init__(self): self.frame = None self.buffer = io.BytesIO() self.condition = Condition() def write(self, buf): if buf.startswith(b'\xff\xd8'): # New frame, copy the existing buffer's content and notify all # clients it's available self.buffer.truncate() with self.condition: self.frame = self.buffer.getvalue() self.condition.notify_all() self.buffer.seek(0) return self.buffer.write(buf) class StreamingHandler(server.BaseHTTPRequestHandler): def do_GET(self): if self.path == '/': self.send_response(301) self.send_header('Location', '/index.html') self.end_headers() elif self.path == '/index.html': content = PAGE.encode('utf-8') self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Content-Length', len(content)) self.end_headers() self.wfile.write(content) elif self.path == '/stream.mjpg': self.send_response(200) self.send_header('Age', 0) self.send_header('Cache-Control', 'no-cache, private') self.send_header('Pragma', 'no-cache') self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME') self.end_headers() try: while True: with output.condition: output.condition.wait() frame = output.frame self.wfile.write(b'--FRAME\r\n') self.send_header('Content-Type', 'image/jpeg') self.send_header('Content-Length', len(frame)) self.end_headers() self.wfile.write(frame) self.wfile.write(b'\r\n') except Exception as e: logging.warning( 'Removed streaming client %s: %s', self.client_address, str(e)) else: self.send_error(404) self.end_headers() class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer): allow_reuse_address = True daemon_threads = True with picamera.PiCamera(resolution='640x480', framerate=24) as camera: output = StreamingOutput() camera.start_recording(output, format='mjpeg') try: address = ('', 8000) server = StreamingServer(address, StreamingHandler) server.serve_forever() finally: camera.stop_recording() The above code starts a streaming server on the raspberry pi and provides a view from picamera. The latency is around 200ms and that is really good for me, but when more clients connect the latency is increasing. With 5 users, there is already quite a lot of lag. This is the main problem in my project, because I have to stream in real time for all users. I predict 20-30 users. I'm just trying achive similar effect to youtube livestreams or google meets where multiple clients can connect and watch view from my webcam but with the least latency. So I thought about creating a second server that would be only receive one stream from my picamera and then would restream it to multiple clients without much delay. I tried to make a similar server to the one with raspberry pi based on the above code, but with the opencv. The picamera stream was intercepted with opencv and displayed, but unfortunately the latency was quite high. I tried also streaming to youtube via rtmp protocol with raspivid and to datarhei restreamer on docker via rtsp protocol with gstreamer, but the latency is around 2-3s and that is too much for me. Is there any way to do that or maybe other solutions? I spent a lot of time for that and I have no idea how to do that... Any advice would be helpful, because I'm newbie in video streaming. PS Sorry for my unperfect english
[ "Video streaming often ends up being a balance between bandwidth, latency and quality.\nMost online movie and live entertainment, sports etc streaming services use HSL or DASH streaming to provide the quality they need and will have a much higher latency that you are aiming for.\nSimilarly, serving many individual clients on a small PI device is likely to overrun the devices capacity as you have seen.\nI think your idea of having the drone send the stream to a server which then provides streams to clients is the most viable approach, but you probably want to look at a real time streaming protocol based server.\nWebRTC is the most obvious current choice, I think, for this type of streaming - it is specifically designed to prioritise latency over other factors, like quality, to enable real time voice and video communications.\nOpen source webRTC server exists which would be good starting point - e.g.:\n\nhttps://github.com/meetecho/janus-gateway\n\nPaid solutions also exists, like the enterprise edition of Ant Media WebRTC steaming server that lists IP Camera streaming amongst its usage scenarios:\n\nhttps://github.com/ant-media/Ant-Media-Server\n\nThere is also a good overview of a node.js based WebRTC solution here which reportedly achieves 400ms latency if that meets your needs: https://stackoverflow.com/a/67887822/334402\n" ]
[ 0 ]
[]
[]
[ "picamera", "python", "raspberry_pi", "streaming", "video_streaming" ]
stackoverflow_0074500671_picamera_python_raspberry_pi_streaming_video_streaming.txt
Q: SUMMARIZE (dax) equivalent in Python (Pandas) I am new using Pandas in Python and I am facing an issue that i am not able to solve alone. I connecting by odbc,SQL, to get df = the following data: JDFEC JDCPY JDTMP PALLETS_STOCK 0 2021-06-30 164 N 1256.0 1 2022-01-27 704 N 1.0 2 2021-03-14 799 N 376.0 3 2022-01-14 723 N 1402.0 4 2022-05-19 776 N 1902.0 ... ... ... ... ... 101417 2022-10-12 714 N 220.0 101418 2020-09-14 153 N 315.0 101419 2021-05-08 109 I 66.0 101420 2022-10-14 057 N 48.0 101421 2022-04-27 776 I 1820.0 I would like to manipulate it to get an outpute similar to the image: New Table example (So grouping by date and creating groups regarding JDCPY and JDTMP values to sum PALLETS_STOCK) I already have Power BI doing it with a SUMMARIZE-CALCULATE-SUM as below: NewTable = SUMMARIZE( Query, Query[JDFEC], "GROUP-A", CALCULATE(SUM(Query[PALLETS_STOCKS], QueryKeynes[JDCPY] = "539" || QueryKeynes[JDCPY] = "109"), "GROUP-B", CALCULATE(SUM(Query[PALLETS_STOCKS], QueryKeynes[JDCPY] = "455", QueryKeynes[JDTMP] = "N"), etc... ) However I have no idea about how I could deal with it in Python ? Someone could guide me please ? EDIT: Final code conditions = [ df["JDCPY"].isin(["003", '006']), (df["JDCPY"].eq("022")) & (df["JDTMP"].eq("N")) ] groups= ["GROUP-A","GROUP-B"] out= ( df .assign(JDFEC= pd.to_datetime(df["JDFEC"]), GROUPS= np.select(conditions, groups, default="GROUP-X")) .groupby(["JDFEC", "GROUPS"], as_index=False)["PALLETS_STOCK"].sum() .pivot_table(index= "JDFEC", columns="GROUPS", values="PALLETS_STOCK") .reset_index() .rename_axis(None, axis=1) ) out.sort_values(by=["JDFEC"]) out["JDFEC"] = pd.to_datetime(out["JDFEC"]).dt.strftime("%d/%m/%Y") print (out) A: IIUC, you can use np.select to form the groups and pandas.pivot_table to reshape. Try this : import pandas as pd import numpy as np conditions = [ df["JDCPY"].isin([539, 109]), (df["JDCPY"].eq(455)) & (df["JDTMP"].eq("N")) ] groups= ["GROUP-A","GROUP-B"] out= ( df .assign(JDFEC= pd.to_datetime(df["JDFEC"]).dt.strftime("%d/%m/%Y"), GROUPS= np.select(conditions, choices, default="GROUP-X")) .groupby(["JDFEC", "GROUPS"], as_index=False)["PALLETS_STOCK"].sum() .pivot_table(index= "JDFEC", columns="GROUPS", values="PALLETS_STOCK") .reset_index() .rename_axis(None, axis=1) ) # Output : print(out) JDFEC GROUP-A GROUP-B GROUP-X 0 08/05/2021 66.0 NaN NaN 1 12/10/2022 220.0 NaN NaN 2 14/01/2022 NaN NaN 1402.0 3 14/09/2020 NaN 315.0 NaN 4 14/10/2022 NaN NaN 48.0 5 19/05/2022 NaN NaN 1902.0 6 27/01/2022 377.0 NaN NaN 7 27/04/2022 NaN NaN 1820.0 8 30/06/2021 NaN NaN 1256.0 # Input used : print(df) JDFEC JDCPY JDTMP PALLETS_STOCK 0 2021-06-30 164 N 1256.0 1 2022-01-27 109 N 1.0 2 2022-01-27 109 N 376.0 3 2022-01-14 723 N 1402.0 4 2022-05-19 776 N 1902.0 5 2022-10-12 539 N 220.0 6 2020-09-14 455 N 315.0 7 2021-05-08 109 I 66.0 8 2022-10-14 57 N 48.0 9 2022-04-27 776 I 1820.0
SUMMARIZE (dax) equivalent in Python (Pandas)
I am new using Pandas in Python and I am facing an issue that i am not able to solve alone. I connecting by odbc,SQL, to get df = the following data: JDFEC JDCPY JDTMP PALLETS_STOCK 0 2021-06-30 164 N 1256.0 1 2022-01-27 704 N 1.0 2 2021-03-14 799 N 376.0 3 2022-01-14 723 N 1402.0 4 2022-05-19 776 N 1902.0 ... ... ... ... ... 101417 2022-10-12 714 N 220.0 101418 2020-09-14 153 N 315.0 101419 2021-05-08 109 I 66.0 101420 2022-10-14 057 N 48.0 101421 2022-04-27 776 I 1820.0 I would like to manipulate it to get an outpute similar to the image: New Table example (So grouping by date and creating groups regarding JDCPY and JDTMP values to sum PALLETS_STOCK) I already have Power BI doing it with a SUMMARIZE-CALCULATE-SUM as below: NewTable = SUMMARIZE( Query, Query[JDFEC], "GROUP-A", CALCULATE(SUM(Query[PALLETS_STOCKS], QueryKeynes[JDCPY] = "539" || QueryKeynes[JDCPY] = "109"), "GROUP-B", CALCULATE(SUM(Query[PALLETS_STOCKS], QueryKeynes[JDCPY] = "455", QueryKeynes[JDTMP] = "N"), etc... ) However I have no idea about how I could deal with it in Python ? Someone could guide me please ? EDIT: Final code conditions = [ df["JDCPY"].isin(["003", '006']), (df["JDCPY"].eq("022")) & (df["JDTMP"].eq("N")) ] groups= ["GROUP-A","GROUP-B"] out= ( df .assign(JDFEC= pd.to_datetime(df["JDFEC"]), GROUPS= np.select(conditions, groups, default="GROUP-X")) .groupby(["JDFEC", "GROUPS"], as_index=False)["PALLETS_STOCK"].sum() .pivot_table(index= "JDFEC", columns="GROUPS", values="PALLETS_STOCK") .reset_index() .rename_axis(None, axis=1) ) out.sort_values(by=["JDFEC"]) out["JDFEC"] = pd.to_datetime(out["JDFEC"]).dt.strftime("%d/%m/%Y") print (out)
[ "IIUC, you can use np.select to form the groups and pandas.pivot_table to reshape.\nTry this :\nimport pandas as pd\nimport numpy as np\n\nconditions = [\n df[\"JDCPY\"].isin([539, 109]),\n (df[\"JDCPY\"].eq(455)) & (df[\"JDTMP\"].eq(\"N\"))\n ]\n\ngroups= [\"GROUP-A\",\"GROUP-B\"]\n\nout= (\n df\n .assign(JDFEC= pd.to_datetime(df[\"JDFEC\"]).dt.strftime(\"%d/%m/%Y\"),\n GROUPS= np.select(conditions, choices, default=\"GROUP-X\"))\n .groupby([\"JDFEC\", \"GROUPS\"], as_index=False)[\"PALLETS_STOCK\"].sum()\n .pivot_table(index= \"JDFEC\", columns=\"GROUPS\", values=\"PALLETS_STOCK\")\n .reset_index()\n .rename_axis(None, axis=1)\n ) \n\n# Output :\nprint(out)\n \n JDFEC GROUP-A GROUP-B GROUP-X\n0 08/05/2021 66.0 NaN NaN\n1 12/10/2022 220.0 NaN NaN\n2 14/01/2022 NaN NaN 1402.0\n3 14/09/2020 NaN 315.0 NaN\n4 14/10/2022 NaN NaN 48.0\n5 19/05/2022 NaN NaN 1902.0\n6 27/01/2022 377.0 NaN NaN\n7 27/04/2022 NaN NaN 1820.0\n8 30/06/2021 NaN NaN 1256.0\n\n# Input used :\nprint(df)\n\n JDFEC JDCPY JDTMP PALLETS_STOCK\n0 2021-06-30 164 N 1256.0\n1 2022-01-27 109 N 1.0\n2 2022-01-27 109 N 376.0\n3 2022-01-14 723 N 1402.0\n4 2022-05-19 776 N 1902.0\n5 2022-10-12 539 N 220.0\n6 2020-09-14 455 N 315.0\n7 2021-05-08 109 I 66.0\n8 2022-10-14 57 N 48.0\n9 2022-04-27 776 I 1820.0\n\n" ]
[ 0 ]
[]
[]
[ "dax", "pandas", "python" ]
stackoverflow_0074531217_dax_pandas_python.txt
Q: webdriver : can't get the broken links So I can get url with driver.get('https://www.w3.org/') But what I want to test is, if I give a fault link, I should get something like This page does not exist. But when I try to capture this, I can't get the result This is failed, can't report the fault link link = "https://www.w3.org/fault_link" if driver.find_elements_by_xpath("//*[contains(text(), 'This page does not exist')]"): logger.info("Found fault link %s", link) this is failed as well, can't capture it. element = driver.find_element( By.XPATH, '//*[@id="__next"]/div[1]/main') # when I print out the element text, I can see the output # 404 ERROR # This page does not exist. # The page you are looking for could not be found. # Go back home → logger.info(element.text) if e.text=='This page does not exist.': logger.info("Found fault link %s", link) this is failed as well if search("This page does not exist.", element.text): logger.info("Found fault link %s", link) Any suggestions? A: Your test is failing since you expecting to find non-existing text. This text This page does not exist in not presented on https://www.w3.org/fault_link page. What you should look for on that specific page is Document not found text. So, this code is working for that specific page: url = "https://www.w3.org/fault_link" driver.get(url) if driver.find_elements(By.XPATH, "//*[contains(text(), 'Document not found')]"): print("Found fault link %s", url) The output is: Found fault link %s https://www.w3.org/fault_link Generally you should understand that each web site will present different error / notification for non-existing page. A: My suggestion is to do something like this. Have in mind that I dont program in python, just did a quick search in order to assemble the example: import requests from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.common.by import By driver = webdriver.Chrome() driver.get("http://www.python.org") assert "Python" in driver.title elements = driver.find_elements(By.XPATH, "//a") print(len(elements)) links = [elem.get_attribute('href') for elem in elements] print(links) x = requests.get(links[0]) print(x.status_code) I am checking the status code only of the first link found on the page. You can do foreach and if something has status code >= 400 then we are talking about broken link.
webdriver : can't get the broken links
So I can get url with driver.get('https://www.w3.org/') But what I want to test is, if I give a fault link, I should get something like This page does not exist. But when I try to capture this, I can't get the result This is failed, can't report the fault link link = "https://www.w3.org/fault_link" if driver.find_elements_by_xpath("//*[contains(text(), 'This page does not exist')]"): logger.info("Found fault link %s", link) this is failed as well, can't capture it. element = driver.find_element( By.XPATH, '//*[@id="__next"]/div[1]/main') # when I print out the element text, I can see the output # 404 ERROR # This page does not exist. # The page you are looking for could not be found. # Go back home → logger.info(element.text) if e.text=='This page does not exist.': logger.info("Found fault link %s", link) this is failed as well if search("This page does not exist.", element.text): logger.info("Found fault link %s", link) Any suggestions?
[ "Your test is failing since you expecting to find non-existing text.\nThis text This page does not exist in not presented on https://www.w3.org/fault_link page.\nWhat you should look for on that specific page is Document not found text.\nSo, this code is working for that specific page:\nurl = \"https://www.w3.org/fault_link\"\ndriver.get(url)\n\nif driver.find_elements(By.XPATH, \"//*[contains(text(), 'Document not found')]\"):\n print(\"Found fault link %s\", url)\n\nThe output is:\nFound fault link %s https://www.w3.org/fault_link\n\nGenerally you should understand that each web site will present different error / notification for non-existing page.\n", "My suggestion is to do something like this. Have in mind that I dont program in python, just did a quick search in order to assemble the example:\nimport requests \nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\n\ndriver = webdriver.Chrome()\ndriver.get(\"http://www.python.org\")\nassert \"Python\" in driver.title\nelements = driver.find_elements(By.XPATH, \"//a\")\nprint(len(elements))\nlinks = [elem.get_attribute('href') for elem in elements]\nprint(links)\nx = requests.get(links[0]) \nprint(x.status_code) \n\nI am checking the status code only of the first link found on the page. You can do foreach and if something has status code >= 400 then we are talking about broken link.\n" ]
[ 1, 1 ]
[]
[]
[ "python", "python_3.x", "selenium", "selenium_webdriver", "webdriver" ]
stackoverflow_0074529013_python_python_3.x_selenium_selenium_webdriver_webdriver.txt
Q: Pycharm Error running 'test': can't run remote python interpreter: {0} When I want to use remote python interpreter to debug my code, but an error appeared: Error running 'test': Can't run remote python interpreter: {0}. But I could directly run this code with remote python interpreter. I try to use command 'which python' and 'which python3' to get the different interpreter, but appeared the same error. Anybody could help me to solve this problem? It's my first time to Debug code remotely with Pycharm. Thank you. Environment: CentOS. Python Environment: /home/jumpserver/miniconda3/envs/vec/bin/python3. (get the directory using command: 'which python3'). A: After testing several methods, I finally solved this problem. I clear all the remote interpreter in Pycharm, then restart Pycharm. After secondly adding the remote python interpreter, the debug works normally. A: For me, the solution was to kill all containers belonging to the considered image: by clicking on the "stop" button in Docker for Windows.
Pycharm Error running 'test': can't run remote python interpreter: {0}
When I want to use remote python interpreter to debug my code, but an error appeared: Error running 'test': Can't run remote python interpreter: {0}. But I could directly run this code with remote python interpreter. I try to use command 'which python' and 'which python3' to get the different interpreter, but appeared the same error. Anybody could help me to solve this problem? It's my first time to Debug code remotely with Pycharm. Thank you. Environment: CentOS. Python Environment: /home/jumpserver/miniconda3/envs/vec/bin/python3. (get the directory using command: 'which python3').
[ "After testing several methods, I finally solved this problem. I clear all the remote interpreter in Pycharm, then restart Pycharm. After secondly adding the remote python interpreter, the debug works normally.\n", "For me, the solution was to kill all containers belonging to the considered image: by clicking on the \"stop\" button in Docker for Windows.\n" ]
[ 1, 0 ]
[]
[]
[ "pycharm", "python", "remote_server" ]
stackoverflow_0071494752_pycharm_python_remote_server.txt
Q: How to get today's date in SPARQL? I use Python and SPARQL to make a scheduled query for a database. I tried to use the python f-string and doc-string to inject today's date in the query, but when I try so, a conflict occurs with SPARQL syntax and the python string. The better way would be to use SPARQL to get today's date. In my python file my query looks like this: query = """ PREFIX skos: <http://www.w3.org/2004/02/skos/core#> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> SELECT * { VALUES (?currentDateString) {(today)} FILTER(xsd:date(?dateApplicabilityNode) >= xsd:date(?validDateString) && xsd:date(?dateApplicabilityNode) <= xsd:date(?currentDateString)) } GROUP BY ... ORDER BY ... How to get today's date in the format of "YYYY-MM-DD"? A: now() returns the datetime (as xsd:dateTime) of the query execution: BIND( now() AS ?currentDateTime ) . To get only the date (as xsd:string), you could use CONCAT() with year(), month(), and day(): BIND( CONCAT( year(?currentDateTime), "-", month(?currentDateTime), "-", day(?currentDateTime) ) AS ?currentDateString ) . (To get the date as xsd:date, you could use xsd:date(?currentDateString).)
How to get today's date in SPARQL?
I use Python and SPARQL to make a scheduled query for a database. I tried to use the python f-string and doc-string to inject today's date in the query, but when I try so, a conflict occurs with SPARQL syntax and the python string. The better way would be to use SPARQL to get today's date. In my python file my query looks like this: query = """ PREFIX skos: <http://www.w3.org/2004/02/skos/core#> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> SELECT * { VALUES (?currentDateString) {(today)} FILTER(xsd:date(?dateApplicabilityNode) >= xsd:date(?validDateString) && xsd:date(?dateApplicabilityNode) <= xsd:date(?currentDateString)) } GROUP BY ... ORDER BY ... How to get today's date in the format of "YYYY-MM-DD"?
[ "now() returns the datetime (as xsd:dateTime) of the query execution:\nBIND( now() AS ?currentDateTime ) .\n\nTo get only the date (as xsd:string), you could use CONCAT() with year(), month(), and day():\nBIND( CONCAT( year(?currentDateTime), \"-\", month(?currentDateTime), \"-\", day(?currentDateTime) ) AS ?currentDateString ) .\n\n(To get the date as xsd:date, you could use xsd:date(?currentDateString).)\n" ]
[ 2 ]
[]
[]
[ "date", "python", "sparql" ]
stackoverflow_0074532061_date_python_sparql.txt
Q: how to compare all values for each row in a dataframe in python Good morning guys, my problem is simple: Given a dataframe like this: import pandas as pd df = pd.DataFrame({ 'a': [1, 2, 3, 4, 5, 6], 'b': [8, 18, 27, 20, 33, 49], 'c': [2, 24, 6, 16, 20, 52]}) print(df) I would like to retrieve for each row the maximum value and compare it with all the others. If the difference is >10, create another column with a string 'yes' or 'not' a b c 0 1 8 2 1 2 18 24 2 3 27 6 3 4 20 16 4 5 33 20 5 6 49 52 I expect this result: a b c res 0 1 8 2 not 1 2 18 24 not 2 3 27 6 yes 3 4 20 16 not 4 5 33 20 yes 5 6 49 52 not Thanks a lot in advance. A: I guess, the below code can help: import pandas as pd df = pd.DataFrame({ 'a': [1, 2, 3, 4, 5, 6], 'b': [8, 18, 27, 20, 33, 49], 'c': [2, 24, 6, 16, 20, 52]}) def find(x): if x > 10: return "yes" else: return "not" df["diff"] = df.max(axis=1) - df.apply(lambda row: row.nlargest(2).values[-1],axis=1) df["res"] = df["diff"].apply(find) df.drop(columns="diff", axis=0, inplace=True) Output: A: This should do the trick. Around twice to ten times as fast as other answers provided here import pandas as pd df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], 'b': [8, 18, 27, 20, 33, 49], 'c': [2, 24, 6, 16, 20, 52]}) df["res"] = df.apply(lambda row: "yes" if all(row.apply(lambda val: max(row) - val > 10 or val == max(row))) else "not", axis=1) print(df) results a b c res 0 1 8 2 not 1 2 18 24 not 2 3 27 6 yes 3 4 20 16 not 4 5 33 20 yes 5 6 49 52 not A: import pandas as pd df = pd.DataFrame({ 'a': [1, 2, 3, 4, 5, 6], 'b': [8, 18, 27, 20, 33, 49], 'c': [2, 24, 6, 16, 20, 52]}) def _max(row): first, second = row.nlargest(2) if first - second > 10: return True else: return False df["res"] = df.apply(_max, axis=1)
how to compare all values for each row in a dataframe in python
Good morning guys, my problem is simple: Given a dataframe like this: import pandas as pd df = pd.DataFrame({ 'a': [1, 2, 3, 4, 5, 6], 'b': [8, 18, 27, 20, 33, 49], 'c': [2, 24, 6, 16, 20, 52]}) print(df) I would like to retrieve for each row the maximum value and compare it with all the others. If the difference is >10, create another column with a string 'yes' or 'not' a b c 0 1 8 2 1 2 18 24 2 3 27 6 3 4 20 16 4 5 33 20 5 6 49 52 I expect this result: a b c res 0 1 8 2 not 1 2 18 24 not 2 3 27 6 yes 3 4 20 16 not 4 5 33 20 yes 5 6 49 52 not Thanks a lot in advance.
[ "I guess, the below code can help:\nimport pandas as pd\n\ndf = pd.DataFrame({ 'a': [1, 2, 3, 4, 5, 6],\n 'b': [8, 18, 27, 20, 33, 49],\n 'c': [2, 24, 6, 16, 20, 52]})\n\ndef find(x):\n if x > 10:\n return \"yes\"\n else:\n return \"not\"\n\ndf[\"diff\"] = df.max(axis=1) - df.apply(lambda row: row.nlargest(2).values[-1],axis=1)\ndf[\"res\"] = df[\"diff\"].apply(find)\ndf.drop(columns=\"diff\", axis=0, inplace=True)\n\nOutput:\n\n", "This should do the trick.\nAround twice to ten times as fast as other answers provided here\nimport pandas as pd\ndf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n 'b': [8, 18, 27, 20, 33, 49],\n 'c': [2, 24, 6, 16, 20, 52]})\n\ndf[\"res\"] = df.apply(lambda row: \"yes\" if all(row.apply(lambda val: max(row) - val > 10 or val == max(row))) else \"not\", axis=1)\n\nprint(df)\n\nresults\n a b c res\n0 1 8 2 not\n1 2 18 24 not\n2 3 27 6 yes\n3 4 20 16 not\n4 5 33 20 yes\n5 6 49 52 not\n\n", "import pandas as pd\ndf = pd.DataFrame({ 'a': [1, 2, 3, 4, 5, 6],\n 'b': [8, 18, 27, 20, 33, 49],\n 'c': [2, 24, 6, 16, 20, 52]})\n\ndef _max(row):\n first, second = row.nlargest(2)\n if first - second > 10:\n return True\n else:\n return False\n\ndf[\"res\"] = df.apply(_max, axis=1)\n\n" ]
[ 0, 0, 0 ]
[]
[]
[ "dataframe", "max", "python", "row" ]
stackoverflow_0074531768_dataframe_max_python_row.txt