title
stringlengths
10
172
question_id
int64
469
40.1M
question_body
stringlengths
22
48.2k
question_score
int64
-44
5.52k
question_date
stringlengths
20
20
answer_id
int64
497
40.1M
answer_body
stringlengths
18
33.9k
answer_score
int64
-38
8.38k
answer_date
stringlengths
20
20
tags
listlengths
1
5
Send client requests to broker only when a worker is available and broker is available
38,817,889
<p>With <strong><code>ZeroMQ</code></strong> for distributed messaging, I'm using the example code provided for the paranoid pirate pattern in python. I have a single <strong>client</strong> (potentially may have more clients) a <strong>broker</strong> and multiple <strong>workers</strong>.</p> <p>I have modified the example such that the client will continue to send requests to the broker queue even when no workers are available rather than retrying and eventually exiting. They will get distributed to the worker(s) when they eventually become available. In my scenario each worker varies in the amount of time it takes to process a given request.</p> <p>The problem I'm seeing is that when the broker goes down (becomes unavailable), the client is unable to tell that the broker is unavailable and it continues to <strong><code>.send()</code></strong> requests. These requests get lost. Only new requests are processed after the broker becomes available once again. </p> <p><strong><code>Client.py</code></strong></p> <pre><code>from random import randint import time import zmq HEARTBEAT_LIVENESS = 3 HEARTBEAT_INTERVAL = 1 INTERVAL_INIT = 1 INTERVAL_MAX = 32 # Paranoid Pirate Protocol constants PPP_READY = "PPP_READY" # Signals worker is ready PPP_HEARTBEAT = "PPP_HEARTBEAT" # Signals worker heartbeat def worker_socket(context, poller): """Helper function that returns a new configured socket connected to the Paranoid Pirate queue""" worker = context.socket(zmq.DEALER) # DEALER identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000)) worker.setsockopt(zmq.IDENTITY, identity) poller.register(worker, zmq.POLLIN) worker.connect("tcp://localhost:5556") worker.send(PPP_READY) return worker context = zmq.Context(1) poller = zmq.Poller() liveness = HEARTBEAT_LIVENESS interval = INTERVAL_INIT heartbeat_at = time.time() + HEARTBEAT_INTERVAL worker = worker_socket(context, poller) cycles = 0 while True: socks = dict(poller.poll(HEARTBEAT_INTERVAL * 1000)) # Handle worker activity on backend if socks.get(worker) == zmq.POLLIN: # Get message # - 3-part envelope + content -&gt; request # - 1-part HEARTBEAT -&gt; heartbeat frames = worker.recv_multipart() if not frames: break # Interrupted if len(frames) == 3: print "I: Normal reply: ", frames liveness = HEARTBEAT_LIVENESS time.sleep(4) # Do some heavy work worker.send_multipart(frames) elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT: # print "I: Queue heartbeat" liveness = HEARTBEAT_LIVENESS else: print "E: Invalid message: %s" % frames interval = INTERVAL_INIT else: liveness -= 1 if liveness == 0: print "W: Heartbeat failure, can't reach queue" print ("W: Reconnecting in") time.sleep(interval) if interval &lt; INTERVAL_MAX: interval *= 2 poller.unregister(worker) worker.setsockopt(zmq.LINGER, 0) worker.close() worker = worker_socket(context, poller) liveness = HEARTBEAT_LIVENESS if time.time() &gt; heartbeat_at: heartbeat_at = time.time() + HEARTBEAT_INTERVAL #print "I: Worker heartbeat" worker.send(PPP_HEARTBEAT) </code></pre> <p><strong><code>Broker.py</code></strong></p> <pre><code>from collections import OrderedDict import time import threading import zmq HEARTBEAT_LIVENESS = 3 # 3..5 is reasonable HEARTBEAT_INTERVAL = 1.0 # Seconds # Paranoid Pirate Protocol constants PPP_READY = "PPP_READY" # Signals worker is ready PPP_HEARTBEAT = "PPP_HEARTBEAT" # Signals worker heartbeat PPP_BUSY = "PPP_BUSY" PPP_FREE = "PPP_FREE" class Worker(object): def __init__(self, address): self.address = address self.expiry = time.time() + HEARTBEAT_INTERVAL * HEARTBEAT_LIVENESS class WorkerQueue(object): def __init__(self): self.queue = OrderedDict() def ready(self, worker): self.queue.pop(worker.address, None) self.queue[worker.address] = worker def purge(self): """Look for &amp; kill expired workers.""" t = time.time() expired = [] for address,worker in self.queue.iteritems(): if t &gt; worker.expiry: # Worker expired expired.append(address) for address in expired: print "W: Idle worker expired: %s" % address self.queue.pop(address, None) def next(self): address, worker = self.queue.popitem(False) return address context = zmq.Context(1) clcontext = zmq.Context() frontend = context.socket(zmq.ROUTER) # ROUTER backend = context.socket(zmq.ROUTER) # ROUTER frontend.bind("tcp://*:5555") # For clients backend.bind("tcp://*:5556") # For workers poll_workers = zmq.Poller() poll_workers.register(backend, zmq.POLLIN) poll_both = zmq.Poller() poll_both.register(frontend, zmq.POLLIN) poll_both.register(backend, zmq.POLLIN) workers = WorkerQueue() heartbeat_at = time.time() + HEARTBEAT_INTERVAL while True: if len(workers.queue) &gt; 0: poller = poll_both else: poller = poll_workers socks = dict(poller.poll(HEARTBEAT_INTERVAL * 1000)) # Handle worker activity on backend if socks.get(backend) == zmq.POLLIN: # Use worker address for LRU routing frames = backend.recv_multipart() if not frames: break address = frames[0] workers.ready(Worker(address)) # Validate control message, or return reply to client msg = frames[1:] if len(msg) == 1: if msg[0] not in (PPP_READY, PPP_HEARTBEAT): print "E: Invalid message from worker: %s" % msg else: print ("sending: %s"%msg) frontend.send_multipart(msg) # Send heartbeats to idle workers if it's time if time.time() &gt;= heartbeat_at: for worker in workers.queue: msg = [worker, PPP_HEARTBEAT] backend.send_multipart(msg) heartbeat_at = time.time() + HEARTBEAT_INTERVAL if socks.get(frontend) == zmq.POLLIN: frames = frontend.recv_multipart() print ("client frames: %s" % frames) if not frames: break frames.insert(0, workers.next()) backend.send_multipart(frames) workers.purge() </code></pre> <p><strong><code>Worker.py</code></strong></p> <pre><code>from random import randint import time import zmq HEARTBEAT_LIVENESS = 3 HEARTBEAT_INTERVAL = 1 INTERVAL_INIT = 1 INTERVAL_MAX = 32 # Paranoid Pirate Protocol constants PPP_READY = "PPP_READY" # Signals worker is ready PPP_HEARTBEAT = "PPP_HEARTBEAT" # Signals worker heartbeat def worker_socket(context, poller): """Helper function that returns a new configured socket connected to the Paranoid Pirate queue""" worker = context.socket(zmq.DEALER) # DEALER identity = "%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000)) worker.setsockopt(zmq.IDENTITY, identity) poller.register(worker, zmq.POLLIN) worker.connect("tcp://localhost:5556") worker.send(PPP_READY) return worker context = zmq.Context(1) poller = zmq.Poller() liveness = HEARTBEAT_LIVENESS interval = INTERVAL_INIT heartbeat_at = time.time() + HEARTBEAT_INTERVAL worker = worker_socket(context, poller) cycles = 0 while True: socks = dict(poller.poll(HEARTBEAT_INTERVAL * 1000)) # Handle worker activity on backend if socks.get(worker) == zmq.POLLIN: # Get message # - 3-part envelope + content -&gt; request # - 1-part HEARTBEAT -&gt; heartbeat frames = worker.recv_multipart() if not frames: break # Interrupted if len(frames) == 3: print "I: Normal reply: ", frames liveness = HEARTBEAT_LIVENESS time.sleep(4) # Do some heavy work worker.send_multipart(frames) elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT: # print "I: Queue heartbeat" liveness = HEARTBEAT_LIVENESS else: print "E: Invalid message: %s" % frames interval = INTERVAL_INIT else: liveness -= 1 if liveness == 0: print "W: Heartbeat failure, can't reach queue" print ("W: Reconnecting in") time.sleep(interval) if interval &lt; INTERVAL_MAX: interval *= 2 poller.unregister(worker) worker.setsockopt(zmq.LINGER, 0) worker.close() worker = worker_socket(context, poller) liveness = HEARTBEAT_LIVENESS if time.time() &gt; heartbeat_at: heartbeat_at = time.time() + HEARTBEAT_INTERVAL #print "I: Worker heartbeat" worker.send(PPP_HEARTBEAT) </code></pre>
0
2016-08-07T19:46:33Z
38,831,699
<h2>A: add a HeartBeat signalling to the Control Layer</h2> <p><strong>If there is the only</strong></p> <blockquote> <p><em>(cit.:)</em> "<strong>problem</strong> I'm seeing is that when the <strong>broker goes down</strong> (becomes unavailable), the client is unable to tell that the broker is unavailable and it continues to send requests."</p> </blockquote> <p>simply add a trivial signalling ( be it of a <code>PUB/SUB</code> or other archetype ) that may keep just the last signalled timestamped presence with newer API using <code>.setsockopt( zmq.CONFLATE )</code> or have some even more robust, bilateral handshaking, that permits the sender to acquire a reasonable assumption right upon it's will to send a next message to the consumer process, that all interested parties are fit and in such a state, that allows to reach the target functionality.</p> <p><strong><code>ZeroMQ</code></strong> is a great in this very sense, to help integrate both the smart-signalling &amp; message-handling services into something like a fully-distributed Finite-State-Automata, or a network of statefully cooperating FSA-s, if one wishes to, <strong>a trully remarkable step into heterogeneous distributed systems</strong> ( queues being some non-core, additional means for doing this smart, fast and in a scaleable manner ).</p> <p>One may soon realise the sort of additional risks of distributed mutual locking, that have to be incorporated into the professional system designs &amp; validations. This new sort of issues may and does appear both in a naive use-case with <strong><code>REQ/REP</code></strong> across error-prone delivery of messages and, in more complex ways, in more complex distributed signalling/messaging FSA networks.</p>
0
2016-08-08T14:20:28Z
[ "python", "zeromq", "pyzmq" ]
Python Tools for Visual Studio - tell the IDE what type a parameter will be
38,817,895
<p>I just started using Visual Studio 2015, and I really like it, especially the code completion features. Right now I'm writing some simple Python code to draw an object on a Tkinter Canvas:</p> <pre><code> def draw(self, can): """ can is a Tkinter canvas on which this node will draw itself. """ can.&lt;something&gt; </code></pre> <p>This being Python, nowhere in the actual code is it indicated that "can" is going to be a Canvas, so VS can't autocomplete or suggest anything after I type "can." Is there a way I can tell Visual Studio that can will be of type tkinter.Canvas?</p>
2
2016-08-07T19:47:15Z
38,883,783
<p>Yes, you can use an assertion statement. This will trigger VS autocomplete settings. It's not the most beautiful way to do it, but I think right now that's the best option. </p> <pre><code>assert isinstance(can, tkinter.Canvas) </code></pre> <p>Take a look at <a href="http://stackoverflow.com/questions/36602807/ptvs2-1-for-vs2012-intellisense-not-work/37351611#37351611">this answer</a> for more.</p>
1
2016-08-10T21:21:24Z
[ "python", "visual-studio", "code-completion" ]
Python Tools for Visual Studio - tell the IDE what type a parameter will be
38,817,895
<p>I just started using Visual Studio 2015, and I really like it, especially the code completion features. Right now I'm writing some simple Python code to draw an object on a Tkinter Canvas:</p> <pre><code> def draw(self, can): """ can is a Tkinter canvas on which this node will draw itself. """ can.&lt;something&gt; </code></pre> <p>This being Python, nowhere in the actual code is it indicated that "can" is going to be a Canvas, so VS can't autocomplete or suggest anything after I type "can." Is there a way I can tell Visual Studio that can will be of type tkinter.Canvas?</p>
2
2016-08-07T19:47:15Z
38,906,235
<p>I'm going to preface this by saying I haven't used Python Tools for Visual Studio (although I use Visual Studio for C# and it's a great IDE) and so this isn't a direct answer for your question. However, I'm guessing from "I just started using Visual Studio 2015" that you might be checking out different tools, so maybe this can still be helpful to you.</p> <p>I started using <a href="https://www.jetbrains.com/pycharm/" rel="nofollow">PyCharm</a> a few days ago since I plan on doing some serious Python work soon, and I have to say it's pretty phenomenal in my opinion.</p> <p>To relate back to your question, you have at least a couple ways of letting PyCharm infer the type of your parameters.</p> <p><strong>Method 1: Python 3 Type Hints</strong></p> <p>If you're using a Python 3.5 interpreter, you can help PyCharm infer the types of parameters by providing <a href="https://www.python.org/dev/peps/pep-0484/" rel="nofollow">type hints</a> in your code.</p> <p><a href="http://i.stack.imgur.com/oM8n2.png" rel="nofollow"><img src="http://i.stack.imgur.com/oM8n2.png" alt="enter image description here"></a></p> <p><strong>Method 2: Docstrings</strong></p> <p>Alternatively, you can let PyCharm infer types by providing a docstring with the function that contains type information. In the example below, I'm using documentation in reStructuredText format.</p> <p>The advantage of doing it this way is that it works whichever interpreter you're using.</p> <p><a href="http://i.stack.imgur.com/CMIAl.png" rel="nofollow"><img src="http://i.stack.imgur.com/CMIAl.png" alt="enter image description here"></a> </p>
1
2016-08-11T21:11:21Z
[ "python", "visual-studio", "code-completion" ]
Python shared read memory
38,817,914
<p>I'm working with a data set that is ~ 8GB big and I'm also using scikit-learn to train various ML models on it. The data set is basically a list of 1D vectors of ints. </p> <p>How can I make the data set available to multiple python processes or how can I encode the data set so I can make it use <code>multiprocessing</code>'s classes? I've been reading on <code>ctypes</code> and I've also been reading into <code>multiprocessing</code>'s documentation but I'm very confused. I only need to make the data readable to every process so I can train the models with it.</p> <p>Do I need to have the shared <code>multiprocessing</code> variables as ctypes? </p> <p>How can I represent the dataset as <code>ctypes</code>?</p>
5
2016-08-07T19:49:21Z
38,837,870
<p>Might be duplicate of <a href="https://stackoverflow.com/questions/17785275/share-large-read-only-numpy-array-between-multiprocessing-processes">Share Large, Read-Only Numpy Array Between Multiprocessing Processes</a></p> <p>You could convert your dataset from current representation to new numpy memmap object, and use it from every process. But it won't be very fast anyway, it just gives some abstraction of working with array from ram, in reality it will be file from HDD, partially cached in RAM. So you should prefer scikit-learn algos with partial_fit methods, and use them.</p> <p><a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.memmap.html" rel="nofollow">https://docs.scipy.org/doc/numpy/reference/generated/numpy.memmap.html</a></p> <p>Actually joblib (which is used in scikit-learn for parallelizing) automatically converts your dataset to memmap representation to use it from different processes (If it's big enough, of course).</p>
1
2016-08-08T20:22:16Z
[ "python", "python-2.7", "scikit-learn", "ctypes", "python-multiprocessing" ]
Python shared read memory
38,817,914
<p>I'm working with a data set that is ~ 8GB big and I'm also using scikit-learn to train various ML models on it. The data set is basically a list of 1D vectors of ints. </p> <p>How can I make the data set available to multiple python processes or how can I encode the data set so I can make it use <code>multiprocessing</code>'s classes? I've been reading on <code>ctypes</code> and I've also been reading into <code>multiprocessing</code>'s documentation but I'm very confused. I only need to make the data readable to every process so I can train the models with it.</p> <p>Do I need to have the shared <code>multiprocessing</code> variables as ctypes? </p> <p>How can I represent the dataset as <code>ctypes</code>?</p>
5
2016-08-07T19:49:21Z
38,838,264
<p>I am assuming you are able to load the whole dataset into RAM in a numpy array, and that you are working on Linux or a Mac. (If you are on Windows or you can't fit the array into RAM, then you should probably copy the array to a file on disk and use <a href="https://docs.scipy.org/doc/numpy/reference/generated/numpy.memmap.html" rel="nofollow">numpy.memmap</a> to access it. Your computer will cache the data from disk into RAM as well as it can, and those caches will be shared between processes, so it's not a terrible solution.)</p> <p>Under the assumptions above, if you need read-only access to the dataset in other processes created via <code>multiprocessing</code>, you can simply create the dataset and then launch the other processes. They will have read-only access to data from the original namespace. They can alter data from the original namespace, but those changes won't be visible to other processes (the memory manager will copy each segment of memory they alter into the local memory map).</p> <p>If your other processes need to alter the original dataset and make those changes visible to the parent process or other processes, you could use something like this:</p> <pre><code>import multiprocessing import numpy as np # create your big dataset big_data = np.zeros((3, 3)) # create a shared-memory wrapper for big_data's underlying data # (it doesn't matter what datatype we use, and 'c' is easiest) # I think if lock=True, you get a serialized object, which you don't want. # Note: you will need to setup your own method to synchronize access to big_data. buf = multiprocessing.Array('c', big_data.data, lock=False) # at this point, buf and big_data.data point to the same block of memory, # (try looking at id(buf[0]) and id(big_data.data[0])) but for some reason # changes aren't propagated between them unless you do the following: big_data.data = buf # now you can update big_data from any process: def add_one_direct(): big_data[:] = big_data + 1 def add_one(a): # People say this won't work, since Process() will pickle the argument. # But in my experience Process() seems to pass the argument via shared # memory, so it works OK. a[:] = a+1 print "starting value:" print big_data p = multiprocessing.Process(target=add_one_direct) p.start() p.join() print "after add_one_direct():" print big_data p = multiprocessing.Process(target=add_one, args=(big_data,)) p.start() p.join() print "after add_one():" print big_data </code></pre>
2
2016-08-08T20:49:13Z
[ "python", "python-2.7", "scikit-learn", "ctypes", "python-multiprocessing" ]
Python Dependency Injection for Lazy Callables
38,817,923
<p>In programming for fun, I've noticed that managing dependencies feels like a boring chore that I want to minimize. <a href="http://code.activestate.com/recipes/413268-dependency-injection-the-python-way/" rel="nofollow">After reading this</a>, I've come up with a super trivial dependency injector, whereby the dependency instances are looked up by a string key:</p> <pre><code>def run_job(job, args, instance_keys, injected): args.extend([injected[key] for key in instance_keys]) return job(*args) </code></pre> <p>This cheap trick works since calls in my program are always lazily defined (where the function handle is stored separately from its arguments) in an iterator, e.g.:</p> <pre><code>jobs_to_run = [[some_func, ("arg1", "arg2"), ("obj_key",)], [other_func,(),()]] </code></pre> <p>The reason is because of a central <code>main loop</code> that must schedule all events. It has a reference to all dependencies, so the injection for <code>"obj_key"</code> can be passed in a dict object, e.g.:</p> <pre><code># inside main loop injection = {"obj_key" : injected_instance} for (callable, with_args, and_dependencies) in jobs_to_run: run_job(callable, with_args, and_dependencies, injection) </code></pre> <p>So when an event happens (user input, etc.), the main loop may call an <code>update()</code> on a particular object who reacts to that input, who in turn builds a list of jobs for the <code>main loop</code> to schedule when there's resources. To me it is cleaner to <em>key-reference</em> any dependencies for <em>someone else</em> to inject rather than having all objects form direct relationships.</p> <p>Because I am lazily defining all callables (functions) for a <a href="https://kivy.org/docs/api-kivy.clock.html" rel="nofollow">game clock engine to run them on its own accord</a>, the above naive approach worked with very little added complexity. Still, there is a code stink in having to reference objects by strings. At the same time, it was stinky to be passing dependencies around, and <a href="http://www.martinfowler.com/articles/injection.html#FormsOfDependencyInjection" rel="nofollow">constructor or setter injection</a> would be overkill, as would perhaps most large <a href="https://pypi.python.org/pypi?%3Aaction=search&amp;term=dependency%20injection&amp;submit=search" rel="nofollow">dependency injection libraries</a>.</p> <p>For the special case of injecting dependencies in <strong><em>callables</em></strong> that are <strong><em>lazily</em></strong> defined, are there more expressive design patterns in existence?</p>
1
2016-08-07T19:50:42Z
38,819,044
<blockquote> <p>I've noticed that managing dependencies feels like a boring chore that I want to minimize. </p> </blockquote> <p>First of all, you shouldn't assume that dependency injection is a means to minimize the chore of dependency management. It doesn't go away, it is just deferred to another place and time and possibly delegated to someone else. </p> <p>That said, if what you are building is going to be used by others it would thus be wise to include some form of version checking into your 'injectables'so that your users will have an easy way to check if their version matches the one that is expected.</p> <blockquote> <p>are there more expressive design patterns in existence?</p> </blockquote> <p>Your method as I understand it is essentially a <a href="https://en.wikipedia.org/wiki/Strategy_pattern" rel="nofollow">Strategy-Pattern</a>, that is the job's code (<em>callable</em>) relies on calling methods on one of several concrete objects. The way you do it is perfectly reasonable - it works and is efficient. </p> <p>You may want to formalize it a bit more to make it easier to read and maintain, e.g.</p> <pre><code>from collections import namedtuple Job = namedtuple('Job', ['callable', 'args', 'strategies']) def run_job(job, using=None): strategies = { k: using[k] for k in job.strategies] } return job.callable(*args, **strategies) jobs_to_run = [ Job(callable=some_func, args=(1,2), strategies=('A', 'B')), Job(callable=other_func, ...), ] strategies = {"A": injected_strategy, ...} for job in jobs_to_run: run_job(job, using=strategies) # actual job def some_func(arg1, arg2, A=None, B=None): ... </code></pre> <p>As you can see the code still does the same thing, but it is instantly more readable, and it concentrates knowledge about the structure of the Job() objects in <code>run_job</code>. Also the call to a job function like <code>some_func</code> will fail if the wrong number of arguments are passed, and the job functions are easier to code and debug due to their explicitely listed and named arguments.</p>
1
2016-08-07T22:12:40Z
[ "python", "dependency-injection", "lazy-evaluation" ]
Field names from dtype listing?
38,817,932
<p>I'm using code from <a href="http://stackoverflow.com/questions/7008608/scipy-io-loadmat-nested-structures-i-e-dictionaries">scipy.io.loadmat nested structures (i.e. dictionaries)</a> to read a matlab structure into Python. I want to make a list of names of fields that appear in the dtype listing. My code is:</p> <pre><code>matfile =loadmat(dataDirStr + matFileName, struct_as_record=True) # a dictionary theseKeys = matfile.keys() #as list thisDict = matfile[ theseKeys[ 1 ] ] #type = void1152, size = (1, 118) # #screen display of contents is: # dtype = [ ( 'Aircraft_Name', 'O'), ('Low_Mass', 'O') ] </code></pre> <p>So, with that in mind, I would like to create a listing of the entries in dtype:</p> <pre><code>thisList = [ 'Aircraft_Name', 'Low_Mass' ] #etc., etc. </code></pre> <p>such that the order of names in the dtype entry is preserved.</p> <p>Can you please help me?</p>
0
2016-08-07T19:51:43Z
38,817,957
<p>Just use a list comprehension and pick up the first item from each tuple, in each iteration:</p> <pre><code>thisList = [item[0] for item in dtype] </code></pre> <p>Or as a functional approach use <a href="https://docs.python.org/3/library/functions.html#zip" rel="nofollow"><code>zip()</code></a>:</p> <pre><code>thisList = next(zip(*dtype)) # in python 2.x zip(*dtype)[0] </code></pre>
0
2016-08-07T19:55:22Z
[ "python", "matlab", "numpy", "scipy" ]
Field names from dtype listing?
38,817,932
<p>I'm using code from <a href="http://stackoverflow.com/questions/7008608/scipy-io-loadmat-nested-structures-i-e-dictionaries">scipy.io.loadmat nested structures (i.e. dictionaries)</a> to read a matlab structure into Python. I want to make a list of names of fields that appear in the dtype listing. My code is:</p> <pre><code>matfile =loadmat(dataDirStr + matFileName, struct_as_record=True) # a dictionary theseKeys = matfile.keys() #as list thisDict = matfile[ theseKeys[ 1 ] ] #type = void1152, size = (1, 118) # #screen display of contents is: # dtype = [ ( 'Aircraft_Name', 'O'), ('Low_Mass', 'O') ] </code></pre> <p>So, with that in mind, I would like to create a listing of the entries in dtype:</p> <pre><code>thisList = [ 'Aircraft_Name', 'Low_Mass' ] #etc., etc. </code></pre> <p>such that the order of names in the dtype entry is preserved.</p> <p>Can you please help me?</p>
0
2016-08-07T19:51:43Z
38,819,062
<pre><code>In [168]: dt=np.dtype([ ( 'Aircraft_Name', 'O'), ('Low_Mass', 'O') ]) In [169]: dt Out[169]: dtype([('Aircraft_Name', 'O'), ('Low_Mass', 'O')]) In [170]: dt.names Out[170]: ('Aircraft_Name', 'Low_Mass') </code></pre> <p>This tuple is handy for setting, or fetching, all fields, one by one:</p> <pre><code>In [171]: x=np.empty((3,),dtype=dt) In [172]: x Out[172]: array([(None, None), (None, None), (None, None)], dtype=[('Aircraft_Name', 'O'), ('Low_Mass', 'O')]) In [173]: for name in x.dtype.names: ...: x[name][:]=['one','two','three'] ...: In [174]: x Out[174]: array([('one', 'one'), ('two', 'two'), ('three', 'three')], dtype=[('Aircraft_Name', 'O'), ('Low_Mass', 'O')]) </code></pre> <p><code>descr</code> is a list description of the variable's dtype; names can be pulled from that as well:</p> <pre><code>In [180]: x.dtype.descr Out[180]: [('Aircraft_Name', '|O'), ('Low_Mass', '|O')] In [181]: [i[0] for i in x.dtype.descr] Out[181]: ['Aircraft_Name', 'Low_Mass'] In [182]: x.dtype.names Out[182]: ('Aircraft_Name', 'Low_Mass') </code></pre>
0
2016-08-07T22:16:41Z
[ "python", "matlab", "numpy", "scipy" ]
Python: reading ID3v1 tag with Kaitai Struct
38,817,956
<p>I'm trying to get Kaitai Struct to parse a ID3v1 tag format for MP3s. According to the <a href="http://id3.org/ID3v1" rel="nofollow">standard</a>, it is a fixed format structure located at the certain offset - but the trick is that this offset is calculated not from the beginning of the file, but from the end.</p> <p>Here's the basic <code>.ksy</code> outline of the tag, I take it for granted that it shouldn't really change:</p> <pre><code>meta: id: id3v1 types: id3v1_tag: seq: - id: magic contents: 'TAG' - id: title size: 30 - id: artist size: 30 - id: album size: 30 - id: year size: 4 - id: comment size: 30 - id: genre type: u1 </code></pre> <p>and here's my naïve idea on how to get it to be read from the 128 bytes till the end of the file:</p> <pre><code>instances: tag: pos: -128 type: id3v1_tag </code></pre> <p>I try that with a simple Python test script:</p> <pre><code>#!/usr/bin/env python from id3v1 import * f = Id3v1.from_file('some_file_with_id3.mp3') print(f.tag) </code></pre> <p>However, it seems to pass that negative amount directly into the Python's File object <code>seek()</code> and thus fails:</p> <blockquote> <p>Traceback (most recent call last): File "try-id3.py", line 6, in print(f.id3v1_tag) File "id3v1_1.py", line 171, in id3v1_tag self._io.seek(-128) File "kaitaistruct.py", line 29, in seek self._io.seek(n) IOError: [Errno 22] Invalid argument</p> </blockquote> <p>After a few other equally insane ideas, I've found a workaround: I can just omit any <code>pos</code> arguments in <code>.ksy</code> and then I manually seek to the proper position in my script:</p> <pre><code>f = Id3v1.from_file('some_file_with_id3.mp3') f._io.seek(-128, 2) print(f.tag.title) </code></pre> <p>This works, but feels really hackish :( Is there a better way to do it in Kaitai Struct and Python?</p>
3
2016-08-07T19:55:21Z
38,827,362
<p>There's a new feature in upcoming v0.4 of Kaitai Struct that addresses exactly this issue. You can use <code>_io</code> to get current stream object and then you can use <code>.size</code> to get full length of current stream in bytes. Thus, if you'd want to address some structure by a fixed offset from the end of the stream, you'd want to use something like in your .ksy:</p> <pre><code>instances: tag: pos: _io.size - 128 type: id3v1_tag </code></pre> <p>Note that while current stable is v0.3, you'll have to download and build the compiler + runtimes from the Github and use the latest ones.</p>
1
2016-08-08T10:54:36Z
[ "python", "id3", "kaitai-struct" ]
Using Python to change the date on an xml document
38,817,961
<p>I have an xml in this form</p> <pre><code>&lt;project name="Hello World"&gt; &lt;testfile name="testfile1"&gt; &lt;type&gt;TXT&lt;/type&gt; &lt;size&gt;1000&lt;/size&gt; &lt;lastModified&gt;2014-08-03 03:40:00&lt;/lastModified&gt; &lt;/testfile&gt; &lt;testfile name="testfile2"&gt; &lt;type&gt;PDF&lt;/type&gt; &lt;size&gt;500&lt;/size&gt; &lt;lastModified&gt;2015-09-23 17:40:17&lt;/lastModified&gt; &lt;/testfile&gt; &lt;/project&gt; </code></pre> <p>This is an .xml file containing info about my project, so I can update my testfiles should they are more than 3 months old. </p> <p>Right now, i'm stuck trying to figure out how to change the element in the .xml file. This is my code so far...</p> <pre><code>import xml.etree.ElementTree as ET import sys from datetime import datetime def updateLastModified(self): today = datetime.now().strftime('%Y-%m-%d %H:%M:%S') # This variable stores todays date and time stamp for future reference. We shouldn't compute a new one every time. today = datetime.strptime(today, '%Y-%m-%d %H:%M:%S') # Now we need to iterate through all the testfiles in our metadata and update their lastModified tag with the current date. for testfile in self.getMetadataRoot().findall('testfile'): lastmodified = testfile.find('lastModified') # get the lastmodified field in it's whole, so we can modify it. previous_update = datetime.strptime(lastmodified.text, '%Y-%m-%d %H:%M:%S') # get the previous date from the lastmodified field and translate it from the str format if previous_update &lt; today: lastmodified.text = str(today.strftime('%Y-%m-%d %H:%M:%S')) self.getMetadataTree().write(self.meta_file) </code></pre> <p>But for some reason, the meta_file is not changing... What am I doing wrong???</p> <p>The problem is after the <code>if</code> statement, where the file is not being modified</p> <p>here are the other methods i'm using in this class:</p> <pre><code>def __init__(self, filepath): self.meta_file = filepath def getMetadataTree(self): return ET.parse(self.meta_file) def getMetadataRoot(self): tree = self.getMetadataTree() root = tree.getroot() return root </code></pre>
0
2016-08-07T19:55:37Z
38,818,409
<p>Your definition of <code>self.getMetadataTree()</code> re-parse the input file every time it gets called. So in the line <code>self.getMetadataTree().write(self.meta_file)</code>, it parse the meta file and write it back (the same info). All previous modification to timestamp is not relevant (it's a different instance of <code>ElementTree</code>).</p> <p>I guess you want to do something like this:</p> <pre><code>import xml.etree.ElementTree as ET import sys from datetime import datetime class TimestampUpdater(object): def __init__(self, filepath): self.meta_file = filepath self.tree = ET.parse(self.meta_file) def getMetadataTree(self): return self.tree def getMetadataRoot(self): return self.tree.getroot() def updateLastModified(self): today = datetime.now() for testfile in self.getMetadataRoot().findall('testfile'): lastmodified = testfile.find('lastModified') previous_update = datetime.strptime(lastmodified.text, '%Y-%m-%d %H:%M:%S') if previous_update &lt; today: lastmodified.text = today.strftime('%Y-%m-%d %H:%M:%S') self.getMetadataTree().write(self.meta_file) def print_file_content(filename): """Print contents of a file.""" with open(filename, 'r') as fh: for line in fh: print line.rstrip() if __name__ == '__main__': metafile = 'test.xml' print "\n====Before updating:====" print_file_content(metafile) updater = TimestampUpdater(metafile) updater.updateLastModified() print "\n====After updating:====" print_file_content(metafile) </code></pre> <p>Output:</p> <pre><code>====Before updating:==== &lt;project name="Hello World"&gt; &lt;testfile name="testfile1"&gt; &lt;type&gt;TXT&lt;/type&gt; &lt;size&gt;1000&lt;/size&gt; &lt;lastModified&gt;2016-08-07 16:58:23&lt;/lastModified&gt; &lt;/testfile&gt; &lt;testfile name="testfile2"&gt; &lt;type&gt;PDF&lt;/type&gt; &lt;size&gt;500&lt;/size&gt; &lt;lastModified&gt;2016-08-07 16:58:23&lt;/lastModified&gt; &lt;/testfile&gt; &lt;/project&gt; ====After updating:==== &lt;project name="Hello World"&gt; &lt;testfile name="testfile1"&gt; &lt;type&gt;TXT&lt;/type&gt; &lt;size&gt;1000&lt;/size&gt; &lt;lastModified&gt;2016-08-07 16:58:36&lt;/lastModified&gt; &lt;/testfile&gt; &lt;testfile name="testfile2"&gt; &lt;type&gt;PDF&lt;/type&gt; &lt;size&gt;500&lt;/size&gt; &lt;lastModified&gt;2016-08-07 16:58:36&lt;/lastModified&gt; &lt;/testfile&gt; &lt;/project&gt; </code></pre>
2
2016-08-07T20:47:21Z
[ "python", "xml", "datetime" ]
Python 3 - need from exec() to return values
38,817,962
<p>I have a small problem with <code>exec()</code>. I have string from Kivy GUI which I need to execute and store values from the executed code. </p> <pre><code>class gui(BoxLayout): def proces(self): t = threading.Thread(target=self.graf) t.daemon = True t.start() def graph(self): CodeInput=self.ids.codas Code=CodeInput.text x, y = [], [] exec(Code) print(x,y) # empty list prints # then x y will serve for plotting a graph </code></pre> <hr> <p>This is a string inside the 'Code':</p> <pre><code>def values(): x=np.linspace(0,3.14,100) y=np.sin(x) print(x) # of course works return x,y x,y=values() </code></pre> <hr> <p>Everything WORKS except I cant get the values x,y from exec(Code). Its like exec() is totaly separate operation that can be started but cannot be entered.</p>
0
2016-08-07T19:55:45Z
38,818,071
<p>You should call <code>exec</code> with a local namespace:</p> <pre><code>loc = {} exec(Code, {}, loc) x = loc['x'] y = loc['y'] </code></pre>
0
2016-08-07T20:06:50Z
[ "python", "return", "exec" ]
convert elastic search timestamp attribute to seconds
38,818,126
<p>How can i convert this timestamp value to an integer representing seconds-since-the-epoch in python</p> <pre><code> 2016-08-06T06:07:36.349Z </code></pre> <p>This is the timestamp value i received in elastic search query.</p> <p>I tried searching but the timestamp format was different from this and <a href="http://stackoverflow.com/questions/7852855/how-to-convert-a-python-datetime-object-to-seconds">this</a> didn't helped neither any other</p>
0
2016-08-07T20:13:43Z
38,818,222
<p>You can use python inbuilt datetime package and its strptime method to convert string into datetime object.</p> <pre><code>from datetime import datetime datetime.strptime("2016-08-06T06:07:36.349Z","%Y-%m-%dT%H:%M:%S.%fZ") </code></pre> <p>after that you should get epoch datetime object which you can get by </p> <pre><code>epoch = datetime.utcfromtimestamp(0) </code></pre> <p>your final seconds can be derived from this method</p> <pre><code>def unix_time_millis(datetime): return (datetime - epoch).total_seconds() * 1000.0 </code></pre> <p>so your complete code looks like </p> <pre><code>from datetime import datetime epoch = datetime.utcfromtimestamp(0) def unix_time_millis(datetime): return (datetime - epoch).total_seconds() * 1000.0 current_date = datetime.strptime("2016-08-06T06:07:36.349Z","%Y-%m-%dT%H:%M:%S.%fZ") print unix_time_millis(current_date) </code></pre> <p>This answer is inspired from this answer <a href="http://stackoverflow.com/a/11111177/4453633">http://stackoverflow.com/a/11111177/4453633</a></p>
1
2016-08-07T20:25:25Z
[ "python", "elasticsearch", "timestamp" ]
Filter only TCP SYN segments in scapy
38,818,131
<p>How can I filter only TCP SYN segments in Scapy? Could someone give me a simple example, please? I tried hard but I dont know scapy!</p>
1
2016-08-07T20:14:07Z
38,818,177
<p>Only TCP SYN segments:</p> <pre><code>tcp[tcpflags] &amp; tcp-syn != 0 </code></pre>
2
2016-08-07T20:20:25Z
[ "python", "scapy" ]
Have Code Change Terminal Settings Upon Load
38,818,150
<p>I have a program that requires me to enter <code>chcp 65001</code> and <code>set PYTHONIOENCODING=utf-8</code> before I run the code, or it cannot process characters properly. Is there a way to have the code automatically do this upon opening? Its a pain, and easy to forget, to do this every time.</p>
1
2016-08-07T20:17:06Z
38,819,626
<p>Actually, you should be able to do this within Python, but supposing that its runtime has this requirement, you can do these settings in a batch file before calling python with your script. For example, suppose this script is called <strong><code>pyutf8.bat</code></strong> :</p> <pre><code>@echo off setlocal chcp 65001 set PYTHONIOENCODING=utf-8 python %1 %2 %3 %4 %5 %6 %7 %8 %9 </code></pre> <p>then you could run your script using this:</p> <pre><code>pyutf8 myscript.py </code></pre> <p>The <code>%1</code>, etc., are the positional parameters of the script. You can check if no parameters were given, using that to supply a default parameter. Something like this:</p> <pre><code>@echo off setlocal chcp 65001 set PYTHONIOENCODING=utf-8 if "%1"=="" goto default python %1 %2 %3 %4 %5 %6 %7 %8 %9 goto :eof :default python myscript.py </code></pre> <p>Further reading:</p> <ul> <li><a href="https://technet.microsoft.com/en-us/library/bb490890.aspx" rel="nofollow">Command-line reference A-Z </a></li> </ul>
0
2016-08-07T23:51:51Z
[ "python", "windows", "cmd", "windows-console" ]
Locating a graphic function (Python)
38,818,220
<p>First off, thanks to the site and everybody on it. I am taking my first python class and have come across this site many times when trouble-shooting coding problems. Thanks to everybody who have already helped me out a little thus far. But, I do have a problem I can't figure out:</p> <p>I have to draw the "5" side of a die in a python graphics window. The catch is that I can't just draw them. My "Dot" function has to be called 5 times to complete the graphic. I had trouble with the dot being placed on the rectangle, but the prof helped me out there. I just can't seem to locate the same dot in different locations. Here is my code so far:</p> <pre><code>from graphics import* def Dot(win): # Draw a dot center=Point(150,150) circ=Circle(center,25) circ.setFill('Black') circ.draw(win) def Dice(): #Build the dice (fill white, background green) win=GraphWin('Shapes',500,500) win.setBackground('Green') rect=Rectangle(Point(100,100),Point(400,400)) rect.setFill('White') rect.draw(win) #Call dot 5 times with different locations: Dot(win) Dot(win) Dot(win) Dot(win) Dot(win) def main(): Dice() main() </code></pre> <p>I have to call the "Dot" function 5 times. However, I have tried to ".move(pt,pt), .locate, etc. I can't figure out how to take the "Dot" function and move it to a different location on the graphics window. Any help would be greatly appreciated.</p> <p>Thanks.</p>
0
2016-08-07T03:15:24Z
38,842,725
<p>I was finally able to get this one. I wasn't aware you could command after the window arguement. So, the Dot(win, 350,150) etc. with the different coordinates worked well. Thanks for the responses and help!</p>
0
2016-08-09T05:29:33Z
[ "python", "graphics" ]
Beautifulsoup constructors and its arguments
38,818,274
<p>I've seen here on SO many ways to initialize a Beautifulsoup object. As far as I can see, you can either pass a string=url or to pass some object. For instance, it's common to use <code>urllib</code>:</p> <pre><code>url="https://somesite.com" url_html="&lt;html&gt;&lt;body&gt;&lt;h1&gt;Some header&lt;/h1&gt;&lt;p&gt;asdas&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;" soup1=BeautifulSoup(url_html, "html.parser") #1st way print(soup1.find("p").text) #can get the text "asdas" soup2=BeautifulSoup(urllib.request.urlopen(url).read(), "html.parser") #2nd way soup3=BeautifulSoup(urllib.request.urlopen(url), "html.parser") #3rd way print(soup1.prettify()) print(soup2.prettify()) print(soup3.prettify()) </code></pre> <p>But what happens inside the two last ways of initializing the soup? As far as I can see, <code>urllib.request.urlopen(url).read()</code> is the same thing as a pure html string <code>url_html</code>. But what about soup3? Does it works because BeautifulSoup's constructor expects a string and there is a toString method in the object returned by <code>urlopen()</code>? And the object is converted into string and in reality 3rd method is the same as the 2nd?</p> <p>Are there any other ways of initializing BeautifulSoup? Which is preferable?</p>
0
2016-08-07T20:31:40Z
38,818,338
<p><code>urlopen()</code> returns an open file-like object. The constructor of Beautifulsoup uses type-checking to see whether it got a file or a string (to be precise, it does <code>markup.hasattr("read")</code>. In the first case, it simply calls its <code>read()</code> method.</p> <p>This is a common pattern in Python libraries that deal with big amounts of user-provided text data.</p> <p>The difference in Soup's case is non-existent. Other libraries might do something more intelligent with a file object, e.g. partition it and not load it to memory en bloque.</p>
2
2016-08-07T20:40:18Z
[ "python", "beautifulsoup" ]
Python, multiprocessing: How to optimize the code? Make the code faster?
38,818,285
<p>I use Python. I have 100 zip files. Each zipfile contains more than 100 xmlfiles. Using the xmlfiles I create csvfiles. </p> <pre><code>from xml.etree.ElementTree import fromstring import zipfile from multiprocessing import Process def parse_xml_for_csv1(data, writer1): root = fromstring(data) for node in root.iter('name'): writer1.writerow(node.get('value')) def create_csv1(): with open('output1.csv', 'w') as f1: writer1 = csv.writer(f1) for i in range(1, 100): z = zipfile.ZipFile('xml' + str(i) + '.zip') # z.namelist() contains more than 100 xml files for finfo in z.namelist(): data = z.read(finfo) parse_xml_for_csv1(data, writer1) def create_csv2(): with open('output2.csv', 'w') as f2: writer2 = csv.writer(f2) for i in range(1, 100): ... if __name__ == "__main__": p1 = Process(target=create_csv1) p2 = Process(target=create_csv2) p1.start() p2.start() p1.join() p2.join() </code></pre> <p>Please tell me, how to optimize my code? Make the code faster?</p>
0
2016-08-07T20:33:28Z
38,818,692
<p>You just need to define one method, with parameters. Split the processing of your 100 .zip files across a given number of threads or processes. The more processes you'll add, the more CPU you'll use, and maybe you can use more than 2 processes, it will be faster (there can be a bottleneck because of disk I/O at some point)</p> <p>In the following code, I can change to 4 or 10 processes, no need to copy/paste code. And it processes different zip files.</p> <p>Your code processes the same 100 files twice in parallel: it was even slower than if there were no multiprocessing!</p> <pre><code>def create_csv(start_index,step): with open('output{0}.csv'.format(start_index//step), 'w') as f1: writer1 = csv.writer(f1) for i in range(start_index, start_index+step): z = zipfile.ZipFile('xml' + str(i) + '.zip') # z.namelist() contains more than 100 xml files for finfo in z.namelist(): data = z.read(finfo) parse_xml_for_csv1(data, writer1) if __name__ == "__main__": nb_files = 100 nb_processes = 2 # raise to 4 or 8 depending on your machine step = nb_files//nb_processes lp = [] for start_index in range(1,nb_files,step): p = Process(target=create_csv,args=[start_index,step]) p.start() lp.append(p) for p in lp: p.join() </code></pre>
2
2016-08-07T21:22:05Z
[ "python", "file", "multiprocessing", "python-multithreading", "zipfile" ]
Python for-loop with modification of the iterationlist
38,818,339
<p>I have this .txt file:</p> <pre><code>king james version of the bible the first book of moses called genesis </code></pre> <p>I use a python-script for statistics on .txt files, the file is read into the list <code>final_list</code> and then I execute this code (part of longer script):</p> <pre><code>for word in final_list: output_list.append((word,final_list.count(word))) final_list[:] = [x for x in final_list if x != word] #DEBUGGING print(len(final_list)) print(final_list) </code></pre> <p>My problem is that some of my list is not used by the for-loop, which I suspect form looking at the output:</p> <pre><code>12 ['james', 'version', 'of', 'the', 'bible', 'the', 'first', 'book', 'of', 'moses', 'called', 'genesis'] 11 ['james', 'of', 'the', 'bible', 'the', 'first', 'book', 'of', 'moses', 'called', 'genesis'] 9 ['james', 'of', 'bible', 'first', 'book', 'of', 'moses', 'called', 'genesis'] 8 ['james', 'of', 'bible', 'book', 'of', 'moses', 'called', 'genesis'] 6 ['james', 'bible', 'book', 'moses', 'called', 'genesis'] 5 ['james', 'bible', 'book', 'moses', 'called'] </code></pre> <p>This makes me wonder how the python for-loop actually works.</p>
-1
2016-08-07T20:40:19Z
38,818,418
<p>The trouble is that you're modifying the list. After the first iteration, Python's iterator stops looking at "position 0" in the list and moves to "position 1." You removed the element originally in position 0 (<code>king</code>), so the element that used to be in position 1 (<code>james</code>) is now in position 0, which means that when Python looks at the element in position 1, it sees the element that was originally in position 2 (<code>version</code>).</p> <p>At the end, Python's iterator has moved to a position that is beyond the end of the list, so it quits in a manner you think is premature, even though that is precisely what you've asked it to do.</p>
1
2016-08-07T20:49:06Z
[ "python" ]
Python for-loop with modification of the iterationlist
38,818,339
<p>I have this .txt file:</p> <pre><code>king james version of the bible the first book of moses called genesis </code></pre> <p>I use a python-script for statistics on .txt files, the file is read into the list <code>final_list</code> and then I execute this code (part of longer script):</p> <pre><code>for word in final_list: output_list.append((word,final_list.count(word))) final_list[:] = [x for x in final_list if x != word] #DEBUGGING print(len(final_list)) print(final_list) </code></pre> <p>My problem is that some of my list is not used by the for-loop, which I suspect form looking at the output:</p> <pre><code>12 ['james', 'version', 'of', 'the', 'bible', 'the', 'first', 'book', 'of', 'moses', 'called', 'genesis'] 11 ['james', 'of', 'the', 'bible', 'the', 'first', 'book', 'of', 'moses', 'called', 'genesis'] 9 ['james', 'of', 'bible', 'first', 'book', 'of', 'moses', 'called', 'genesis'] 8 ['james', 'of', 'bible', 'book', 'of', 'moses', 'called', 'genesis'] 6 ['james', 'bible', 'book', 'moses', 'called', 'genesis'] 5 ['james', 'bible', 'book', 'moses', 'called'] </code></pre> <p>This makes me wonder how the python for-loop actually works.</p>
-1
2016-08-07T20:40:19Z
38,818,440
<p>You should not modify the list you are iterating on inside the for loop. Otherwise you will get this kind of weird behavior. You'd better use a copy of the final_list likeso:</p> <pre><code>final_copy = final_list[:] for word in final_list: output_list.append((word,final_copy.count(word))) final_copy = [x for x in final_copy if x != word] </code></pre>
2
2016-08-07T20:51:39Z
[ "python" ]
Does flask session variable maintain state across threads?
38,818,350
<p>According to the <a href="http://flask.pocoo.org/docs/0.11/api/#sessions" rel="nofollow">docs</a>: </p> <blockquote> <p>A session basically makes it possible to remember information from one request to another.</p> </blockquote> <p>But elsewhere, <a href="http://flask.pocoo.org/docs/0.10/reqcontext/#how-the-context-works" rel="nofollow">the docs say</a> that <code>session</code> is local to a thread. So if <code>flask</code> is used in a multithreaded environment (say, <code>app.run(threaded=True)</code>), how can it fulfill this promise? </p> <p>I only see two alternatives:</p> <ul> <li><code>flask</code> somehow ensures that the same user session is always serviced by the same thread (which seems horrible because a single user browsing in two tabs would have to wait for his first request to finish before his second request is handled) </li> <li><code>session</code> is completely useless if I allow threads, and I need to store session-specific information in a database (which seems rather unexpected and isn't mentioned in the docs)</li> </ul> <p>Am I missing something?</p> <p>Edit: I guess another alternative is:</p> <ul> <li><code>session</code> itself is a thread-local variable from python perspective (i.e., python sees each thread's <code>session</code> as completely independent objects), but it is somehow synchronized across threads by <code>flask</code> (presumably with some process-global in-memory data structure). In that case, <code>flask</code> could make all modifications to <code>session</code> atomic (using some inter-thread synchronization mechanism).</li> </ul> <p>Update: based on @Daniel Roseman answer, my last guess was correct, except flask doesn't do it itself but rather asks the user agent to store / retrieve persistent state (and thus, the state is not per <em>flask process</em> or per <em>flask application</em> but rather per whatever collection of requests the user agent happens to persist the state over - what one might call <em>user session</em>).</p>
1
2016-08-07T20:41:35Z
38,818,411
<p>From the <em>very next sentence</em> after the one you quoted:</p> <blockquote> <p>The way Flask does this is by using a signed cookie.</p> </blockquote> <p>Flask stores the information in the cookie before sending the response, and reads it back at the beginning of the next request.</p>
2
2016-08-07T20:48:07Z
[ "python", "multithreading", "flask" ]
Custom User Model ValueError: Related Model Django(1.9)
38,818,370
<p>There is an error when I try to add CustomUser model as ForiegnKey to a field in django. The authentication is working using the CustomUser model but for some reason I have getting this error:</p> <pre><code>ValueError: Related model 'authentication.UserModel' cannot be resolved </code></pre> <p>Here is the app/models.py:</p> <pre><code> from django.db import models from django.conf import settings class A(models.Model) tender_authority = models.ForeignKey(settings.AUTH_USER_MODEL) </code></pre> <p>Here is the settings.py</p> <pre><code>AUTH_USER_MODEL = 'authentication.UserModel' </code></pre> <p>Here is the tree structure of the project:</p> <pre><code>myproject settings.py authentication models.py ---&gt; UserModel (my custom user model name) Model is present here myapp models.py ---&gt; Error happening here </code></pre> <p>Thank you in advance. </p> <p>Full Trace back, forgot to mention that this is happening while I am trying to migrate: </p> <pre><code>Operations to perform: Apply all migrations: auth, admin, authentication, contenttypes, sessions, Forms Running migrations: Rendering model states... DONE Applying Forms.0010_tech_auth_Usermodel_to_User...Traceback (most recent call last): File "manage.py", line 10, in &lt;module&gt; execute_from_command_line(sys.argv) File "/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py", line 353, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.5/dist-packages/django/core/management/__init__.py", line 345, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/usr/local/lib/python3.5/dist-packages/django/core/management/base.py", line 348, in run_from_argv self.execute(*args, **cmd_options) File "/usr/local/lib/python3.5/dist-packages/django/core/management/base.py", line 399, in execute output = self.handle(*args, **options) File "/usr/local/lib/python3.5/dist-packages/django/core/management/commands/migrate.py", line 200, in handle executor.migrate(targets, plan, fake=fake, fake_initial=fake_initial) File "/usr/local/lib/python3.5/dist-packages/django/db/migrations/executor.py", line 92, in migrate self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial) File "/usr/local/lib/python3.5/dist-packages/django/db/migrations/executor.py", line 121, in _migrate_all_forwards state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial) File "/usr/local/lib/python3.5/dist-packages/django/db/migrations/executor.py", line 198, in apply_migration state = migration.apply(state, schema_editor) File "/usr/local/lib/python3.5/dist-packages/django/db/migrations/migration.py", line 123, in apply operation.database_forwards(self.app_label, schema_editor, old_state, project_state) File "/usr/local/lib/python3.5/dist-packages/django/db/migrations/operations/fields.py", line 201, in database_forwards schema_editor.alter_field(from_model, from_field, to_field) File "/usr/local/lib/python3.5/dist-packages/django/db/backends/base/schema.py", line 454, in alter_field new_db_params = new_field.db_parameters(connection=self.connection) File "/usr/local/lib/python3.5/dist-packages/django/db/models/fields/related.py", line 967, in db_parameters return {"type": self.db_type(connection), "check": []} File "/usr/local/lib/python3.5/dist-packages/django/db/models/fields/related.py", line 958, in db_type rel_field = self.target_field File "/usr/local/lib/python3.5/dist-packages/django/db/models/fields/related.py", line 861, in target_field return self.foreign_related_fields[0] File "/usr/local/lib/python3.5/dist-packages/django/db/models/fields/related.py", line 594, in foreign_related_fields return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field) File "/usr/local/lib/python3.5/dist-packages/django/db/models/fields/related.py", line 581, in related_fields self._related_fields = self.resolve_related_fields() File "/usr/local/lib/python3.5/dist-packages/django/db/models/fields/related.py", line 566, in resolve_related_fields raise ValueError('Related model %r cannot be resolved' % self.remote_field.model) ValueError: Related model 'authentication.UserModel' cannot be resolved </code></pre> <p>Migration: </p> <p>'</p> <p>class Migration(migrations.Migration):</p> <pre><code>dependencies = [ ('Forms', '0009_revised_tender_changes'), ] operations = [ migrations.AlterField( model_name='agreementsanctionmodel', name='bank_details_one', field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.BankDetailsModel', verbose_name='Bank Details'), ), migrations.AlterField( model_name='agreementsanctionmodel', name='contractor', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.Contractor', verbose_name='Contractor'), ), migrations.AlterField( model_name='agreementsanctionmodel', name='contractor_name', field=models.CharField(blank=True, max_length=250, verbose_name='Contractor'), ), migrations.AlterField( model_name='agreementsanctionmodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='agreementsanctionmodel', name='financial_year', field=models.CharField(blank=True, max_length=5, verbose_name='Financial Year'), ), migrations.AlterField( model_name='agreementsanctionmodel', name='tender', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.Tender', verbose_name='Tender'), ), migrations.AlterField( model_name='agreementsanctionmodel', name='tender_name', field=models.CharField(blank=True, max_length=50, unique=True, verbose_name='Tender'), ), migrations.AlterField( model_name='agreementsanctionmodel', name='updated_on', field=models.DateField(blank=True, default=None, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='consultant', name='districts', field=models.CharField(max_length=50, null=True, verbose_name='District'), ), migrations.AlterField( model_name='consultant', name='email', field=models.EmailField(blank=True, max_length=254, null=True, unique=True, verbose_name='Email ID'), ), migrations.AlterField( model_name='consultant', name='first_name', field=models.CharField(default=0, max_length=100, verbose_name='First Name'), preserve_default=False, ), migrations.AlterField( model_name='consultant', name='house_number', field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Phone Number'), ), migrations.AlterField( model_name='consultant', name='last_name', field=models.CharField(max_length=100, verbose_name='Last Name'), ), migrations.AlterField( model_name='consultant', name='middle_name', field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Middle Name'), ), migrations.AlterField( model_name='consultant', name='pan_number', field=models.CharField(default=0, max_length=50, unique=True, verbose_name='PAN Number'), preserve_default=False, ), migrations.AlterField( model_name='consultant', name='state', field=models.CharField(max_length=50, null=True, verbose_name='State'), ), migrations.AlterField( model_name='consultant', name='street_name', field=models.CharField(max_length=50, null=True, verbose_name='Street Name'), ), migrations.AlterField( model_name='consultant', name='tin_number', field=models.CharField(default=0, max_length=10, unique=True, verbose_name='TIN Number'), preserve_default=False, ), migrations.AlterField( model_name='contractor', name='districts', field=models.CharField(max_length=50, null=True, verbose_name='District'), ), migrations.AlterField( model_name='contractor', name='email', field=models.EmailField(blank=True, max_length=254, null=True, unique=True, verbose_name='Email ID'), ), migrations.AlterField( model_name='contractor', name='first_name', field=models.CharField(max_length=100, verbose_name='First Name'), ), migrations.AlterField( model_name='contractor', name='house_number', field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Phone Number'), ), migrations.AlterField( model_name='contractor', name='last_name', field=models.CharField(max_length=100, verbose_name='Last Name'), ), migrations.AlterField( model_name='contractor', name='middle_name', field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Middle Name'), ), migrations.AlterField( model_name='contractor', name='state', field=models.CharField(max_length=50, null=True, verbose_name='State'), ), migrations.AlterField( model_name='contractor', name='street_name', field=models.CharField(max_length=50, null=True, verbose_name='Street Name'), ), migrations.AlterField( model_name='locationmodel', name='district', field=models.CharField(max_length=50, verbose_name='District '), ), migrations.AlterField( model_name='locationmodel', name='division', field=models.CharField(max_length=50, verbose_name='Division '), ), migrations.AlterField( model_name='locationmodel', name='location', field=models.CharField(max_length=50, verbose_name='Location'), ), migrations.AlterField( model_name='locationmodel', name='place', field=models.CharField(max_length=50, verbose_name='Place '), ), migrations.AlterField( model_name='locationmodel', name='work', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.WorkModel', verbose_name='Work'), ), migrations.AlterField( model_name='nominationmodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='nominationmodel', name='tender_authority', field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.TechnicalAuthority', verbose_name='Technical Authority'), ), migrations.AlterField( model_name='nominationmodel', name='updated_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='nominationmodel', name='work_name', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.WorkModel', verbose_name='Work'), ), migrations.AlterField( model_name='pettycontractors', name='districts', field=models.CharField(max_length=50, null=True, verbose_name='District'), ), migrations.AlterField( model_name='pettycontractors', name='email', field=models.EmailField(blank=True, max_length=254, null=True, unique=True, verbose_name='Email ID'), ), migrations.AlterField( model_name='pettycontractors', name='first_name', field=models.CharField(default=0, max_length=100, verbose_name='First Name'), preserve_default=False, ), migrations.AlterField( model_name='pettycontractors', name='house_number', field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Phone Number'), ), migrations.AlterField( model_name='pettycontractors', name='last_name', field=models.CharField(max_length=100, verbose_name='Last Name'), ), migrations.AlterField( model_name='pettycontractors', name='middle_name', field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Middle Name'), ), migrations.AlterField( model_name='pettycontractors', name='pan_number', field=models.CharField(default=0, max_length=50, unique=True, verbose_name='PAN Number'), preserve_default=False, ), migrations.AlterField( model_name='pettycontractors', name='state', field=models.CharField(max_length=50, null=True, verbose_name='State'), ), migrations.AlterField( model_name='pettycontractors', name='street_name', field=models.CharField(max_length=50, null=True, verbose_name='Street Name'), ), migrations.AlterField( model_name='pettycontractors', name='tin_number', field=models.CharField(default=0, max_length=10, unique=True, verbose_name='TIN Number'), preserve_default=False, ), migrations.AlterField( model_name='projectmodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='projectmodel', name='financial_year', field=models.CharField(blank=True, max_length=5, verbose_name='Financial Year'), ), migrations.AlterField( model_name='projectmodel', name='project_name', field=models.CharField(blank=True, max_length=250, unique=True, verbose_name='Project Name'), ), migrations.AlterField( model_name='projectmodel', name='scheme_name', field=models.CharField(blank=True, max_length=250, verbose_name='Scheme Name'), ), migrations.AlterField( model_name='projectmodel', name='updated_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Update On'), ), migrations.AlterField( model_name='revisedadministrativesanctionmodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='revisedadministrativesanctionmodel', name='project', field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.ProjectModel', verbose_name='Project'), ), migrations.AlterField( model_name='revisedadministrativesanctionmodel', name='updated_on', field=models.DateField(blank=True, default=None, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='revisedtechnicalapprovalmodel', name='authority', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.TechnicalAuthority', verbose_name='Technical Authority'), ), migrations.AlterField( model_name='revisedtechnicalapprovalmodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='revisedtechnicalapprovalmodel', name='updated_on', field=models.DateField(blank=True, default=None, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='revisedtechnicalapprovalmodel', name='work_name', field=models.CharField(blank=True, max_length=300, verbose_name='Work'), ), migrations.AlterField( model_name='revisedtechnicalsanctionmodel', name='project', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.ProjectModel', verbose_name='Project'), ), migrations.AlterField( model_name='schememodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='schememodel', name='dept_name', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.UserDepartment', verbose_name='Department Name'), ), migrations.AlterField( model_name='schememodel', name='financial_year', field=models.CharField(blank=True, max_length=5, verbose_name='Financial Year'), ), migrations.AlterField( model_name='schememodel', name='scheme_name', field=models.CharField(max_length=250, unique=True, verbose_name='Scheme Name'), ), migrations.AlterField( model_name='schememodel', name='total_admin_sanction_amount', field=models.FloatField(verbose_name='Total Admin Sanction Amount'), ), migrations.AlterField( model_name='schememodel', name='updated_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='technicalapprovalmodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='technicalapprovalmodel', name='financial_year', field=models.CharField(blank=True, max_length=5, verbose_name='Financial Year'), ), migrations.AlterField( model_name='technicalapprovalmodel', name='project_name', field=models.CharField(blank=True, max_length=250, verbose_name='Project Name'), ), migrations.AlterField( model_name='technicalapprovalmodel', name='updated_on', field=models.DateField(blank=True, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='technicalapprovalmodel', name='work_name', field=models.CharField(blank=True, max_length=300, verbose_name='Work Name'), ), migrations.AlterField( model_name='technicalauthority', name='tender_authority', field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Technical Authority'), preserve_default=False, ), migrations.AlterField( model_name='technicalsanctionmodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='technicalsanctionmodel', name='financial_year', field=models.CharField(blank=True, max_length=5, verbose_name='Financial Year'), ), migrations.AlterField( model_name='technicalsanctionmodel', name='project_name', field=models.CharField(blank=True, max_length=300, verbose_name='Project Name'), ), migrations.AlterField( model_name='technicalsanctionmodel', name='technical_authority', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.TechnicalAuthority', verbose_name='Technical Authority'), ), migrations.AlterField( model_name='technicalsanctionmodel', name='updated_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Update On'), ), migrations.AlterField( model_name='technicalwing', name='technical_wing_name', field=models.CharField(max_length=10, null=True, unique=True, verbose_name='Technical Wing'), ), migrations.AlterField( model_name='tender', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='tender', name='updated_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='userdepartment', name='department_name', field=models.CharField(max_length=50, null=True, unique=True, verbose_name='Department Name'), ), migrations.AlterField( model_name='userdepartment', name='department_reference', field=models.CharField(max_length=7, null=True, unique=True, verbose_name='Department Name'), ), migrations.AlterField( model_name='workmodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='workmodel', name='financial_year', field=models.CharField(blank=True, max_length=5, verbose_name='Financial Year'), ), migrations.AlterField( model_name='workmodel', name='project_name', field=models.CharField(blank=True, max_length=250, verbose_name='Project Name'), ), migrations.AlterField( model_name='workmodel', name='updated_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='workmodel', name='work_name', field=models.CharField(blank=True, max_length=300, unique=True, verbose_name='Work Name'), ), migrations.AlterField( model_name='workordermodel', name='created_on', field=models.DateTimeField(auto_now=True, verbose_name='Created On'), ), migrations.AlterField( model_name='workordermodel', name='tender_authority', field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.TechnicalAuthority', verbose_name='Technical Authority'), ), migrations.AlterField( model_name='workordermodel', name='updated_on', field=models.DateTimeField(blank=True, null=True, verbose_name='Updated On'), ), migrations.AlterField( model_name='workordermodel', name='work_name', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='Forms.WorkModel', verbose_name='Work'), ), ] </code></pre> <p>'</p>
0
2016-08-07T20:43:16Z
38,818,514
<p>You seem to be missing a dependency for the migration that adds the user model. I'm not sure why it's missing, but try adding it manually and running your migrations again:</p> <pre><code>dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('Forms', '0009_revised_tender_changes'), ] </code></pre>
1
2016-08-07T21:00:43Z
[ "python", "django", "django-models" ]
No output with .duplicated in pandas?
38,818,401
<p>I want to find all of the rows which have duplicates in the columns of city, round_latitude, and round_longitude. So, if two rows share the same values in each of those columns, it would be returned.</p> <p>I'm not exactly sure what is going on here: I'm certain that there are duplicates in the dataset. No error is returned when running In[38], the column names are returned but there are no entries. What am I doing wrong here? How can I fix this?</p> <p>If it helps, I'm also working off of some of the code in <a href="http://datadryad.org/bitstream/handle/10255/dryad.114711/Sci-Hub.html?sequence=2" rel="nofollow">this guide.</a> (The format is HTML.)</p> <pre><code># In[29]: def dl_by_loc(path): endname = "USA_downloads.csv" with open(path + endname, "r") as f: data = pd.read_csv(f) data.columns = ["date","city","coords","doi","latitude","longitude","round_latitude","round_longitude"] data = data.groupby(["round_latitude","round_longitude","city"]).count() data = data.rename(columns = {"date":"downloads"}) return data["downloads"] # In[30]: downloads_by_coords = dl_by_loc(path) len(downloads_by_coords) # In[31]: downloads_by_coords = downloads_by_coords.reset_index() downloads_by_coords.columns = ["round_latitude","round_longitude","city","downloads"] # In[32]: downloads_by_coords.head() # In[38]: by_coords = downloads_by_coords.reset_index() coord_dupes = by_coords[by_coords.duplicated(subset=["round_latitude","round_longitude","city"])] coord_dupes </code></pre> <p>Here are a few lines from the data, as requested:</p> <pre><code>2016-02-16 00:32:19,Philadelphia,"39.9525839,-75.1652215",10.1042/BJ20091140,39.9525839,-75.1652215,40.0,-75.0 2016-02-16 00:32:19,Philadelphia,"39.9525839,-75.1652215",10.1096/fj.05-5309fje,39.9525839,-75.1652215,40.0,-75.0 2016-02-16 00:32:19,Philadelphia,"39.9525839,-75.1652215",10.1186/1478-811X-11-15,39.9525839,-75.1652215,40.0,-75.0 2016-02-16 00:32:21,Houston,"29.7604267,-95.3698028",10.1039/P19730002379,29.7604267,-95.36980279999999,30.0,-95.0 </code></pre>
-1
2016-08-07T20:46:33Z
38,819,324
<p><code>dl_by_loc(path)</code> returns a Series with a MultiIndex:</p> <pre><code>round_latitude round_longitude city 30.0 -95.0 Houston 1 40.0 -75.0 Philadelphia 3 Name: downloads, dtype: int64 </code></pre> <p>If you take a look at the definition of that function, it groups the DataFrame by round_latitude, round_longitude and city columns and counts the number of occurrences. Later on, you convert this to a DataFrame by calling reset_index(). Now, the downloads column is showing how many times each lat, lon, city combination occurred in the original DataFrame. Since it is a groupby result, these combinations are in fact not duplicated because they were aggregated previously. If you want to detect duplicated ones from this DataFrame, you can use:</p> <pre><code>by_coords[by_coords['downloads']&gt;1] </code></pre> <p>Your method would still work in the original DataFrame. Note that removing duplicates or grouping data with float type data has some <a href="http://floating-point-gui.de/" rel="nofollow">risks</a>. Pandas generally handles them but to make sure, if you want 1-digit precision, you can multiply by 10 and convert to integer.</p>
1
2016-08-07T22:58:54Z
[ "python", "pandas", "numpy" ]
pandas.to_datetime: which format to choose?
38,818,459
<p>I have a .csv like that:</p> <pre class="lang-none prettyprint-override"><code>"Date","Time","Open","High","Low","Close","Volume" 12/30/2002,0930,0.94,0.94,0.94,0.94,571466 </code></pre> <p>I want to convert the "Time" column values with pandas.to_datetime module, but I can't find the correct format to use because there's not a separator between hours and minutes.</p> <p>Can someone help me please?</p>
0
2016-08-07T20:53:51Z
38,818,576
<p>This should work, but I'm not sure if there's a better way:</p> <pre><code>from StringIO import StringIO fh = StringIO('''"Date","Time","Open","High","Low","Close","Volume" 12/30/2002,0930,0.94,0.94,0.94,0.94,571466''') df = pd.read_csv(fh, dtype={'Time':object}) df['Timestamp'] = pd.to_datetime(df['Date'] + ' ' + df['Time']) print df </code></pre> <p>Output:</p> <pre><code> Date Time Open High Low Close Volume Timestamp 0 12/30/2002 0930 0.94 0.94 0.94 0.94 571466 2002-12-30 09:30:00 </code></pre>
1
2016-08-07T21:09:01Z
[ "python", "python-2.7", "datetime", "pandas", "datetime-format" ]
pandas.to_datetime: which format to choose?
38,818,459
<p>I have a .csv like that:</p> <pre class="lang-none prettyprint-override"><code>"Date","Time","Open","High","Low","Close","Volume" 12/30/2002,0930,0.94,0.94,0.94,0.94,571466 </code></pre> <p>I want to convert the "Time" column values with pandas.to_datetime module, but I can't find the correct format to use because there's not a separator between hours and minutes.</p> <p>Can someone help me please?</p>
0
2016-08-07T20:53:51Z
38,819,065
<p>You can tell pandas that there is no separator there by specifying the format of the date. <code>%H%M</code> tells python that you have a time that has no divider. If you had a divider of <code>:</code>, for example, then you would use <code>format='%H:%M'</code>.</p> <p>Assuming you have loaded everything in and your dataframe is loaded in as <code>df</code>.</p> <pre><code>from pandas import pandas # file loading and such asset['Date'] = pandas.to_datetime(asset['Date']) asset['Time'] = pandas.DatetimeIndex(pandas.to_datetime(asset['Time'], format = '%H%M')).time </code></pre> <p>Will give you</p> <pre><code> Date Time Open High Low Close Volume 0 2002-12-30 09:30:00 0.94 0.94 0.94 0.94 571466 </code></pre> <p>For Python 3 people:</p> <pre><code>df['Time'] = pd.to_datetime(df['Time'], format='%H%M').dt.time </code></pre> <p>Will give you</p> <pre><code> Date Time Open High Low Close Volume 0 12/30/2002 09:30:00 0.94 0.94 0.94 0.94 571466 </code></pre>
0
2016-08-07T22:17:48Z
[ "python", "python-2.7", "datetime", "pandas", "datetime-format" ]
pandas.to_datetime: which format to choose?
38,818,459
<p>I have a .csv like that:</p> <pre class="lang-none prettyprint-override"><code>"Date","Time","Open","High","Low","Close","Volume" 12/30/2002,0930,0.94,0.94,0.94,0.94,571466 </code></pre> <p>I want to convert the "Time" column values with pandas.to_datetime module, but I can't find the correct format to use because there's not a separator between hours and minutes.</p> <p>Can someone help me please?</p>
0
2016-08-07T20:53:51Z
38,824,333
<p>You can a pass a list of the columns to parse as a complete <code>datetime</code> by passing a list of lists to <code>parse_dates</code> param:</p> <pre><code>In [6]: import io import pandas as pd t='''"Date","Time","Open","High","Low","Close","Volume" 12/30/2002,0930,0.94,0.94,0.94,0.94,571466''' df = pd.read_csv(io.StringIO(t), parse_dates=[['Date','Time']], keep_date_col=True) df Out[6]: Date_Time Date Time Open High Low Close Volume 0 2002-12-30 09:30:00 12/30/2002 0930 0.94 0.94 0.94 0.94 571466 </code></pre> <p>You can see that the <code>dtypes</code> are as expected:</p> <pre><code>In [7]: df.info() &lt;class 'pandas.core.frame.DataFrame'&gt; RangeIndex: 1 entries, 0 to 0 Data columns (total 8 columns): Date_Time 1 non-null datetime64[ns] Date 1 non-null object Time 1 non-null object Open 1 non-null float64 High 1 non-null float64 Low 1 non-null float64 Close 1 non-null float64 Volume 1 non-null int64 dtypes: datetime64[ns](1), float64(4), int64(1), object(2) memory usage: 144.0+ bytes </code></pre>
0
2016-08-08T08:20:24Z
[ "python", "python-2.7", "datetime", "pandas", "datetime-format" ]
Python Installing issu
38,818,484
<p><a href="http://i.stack.imgur.com/MofXs.png" rel="nofollow"><img src="http://i.stack.imgur.com/MofXs.png" alt="enter image description here"></a></p> <p>I reinstalled it many times but nothing is solved , what should I do ?</p>
-8
2016-08-07T20:56:29Z
39,382,523
<p>Although this question is a little old, I have a possible solution.</p> <p>In English the message is:</p> <blockquote> <p>Unable to start the program because api-ms-win-crt-runtime-l1-1-0.dll cannot be found on your computer. Try to reinstall the program to fix this problem.</p> </blockquote> <p>The problem is a little confusing because it's not really a python problem. It's a DLL that you need to re-install.</p> <p>Simply put you don't have the api-ms-win-crt-runtime-l1-1-0.dll on your machine, or it is unregistered. You could try to re-register it, but it's easier just to do the following:</p> <ol> <li>Run windows update. </li> <li>Download Visual C++ Redistributeable: <a href="https://support.microsoft.com/en-us/kb/2977003" rel="nofollow">https://support.microsoft.com/en-us/kb/2977003</a></li> <li>Run the installer and select "uninstall".</li> <li>Run the installer again and select "install".</li> </ol> <p>If Anaconda already fixed it for you, I hope this still helps someone somewhere sometime.</p>
0
2016-09-08T04:18:32Z
[ "python", "installer" ]
Skip rows with missing values in read_csv
38,818,609
<p>I have a very large csv which I need to read in. To make this fast and save RAM usage I am using read_csv and set the dtype of some columns to np.uint32. The problem is that some rows have missing values and pandas uses a float to represent those. </p> <ol> <li>Is it possible to simply skip rows with missing values? I know I could do this after reading in the whole file but this means I couldn't set the dtype until then and so would use too much RAM.</li> <li>Is it possible to convert missing values to some other I choose during the reading of the data? </li> </ol>
0
2016-08-07T21:12:55Z
38,819,013
<p>If you show some data, SO ppl could help. </p> <pre><code>pd.read_csv('FILE', keep_default_na=False) </code></pre> <p>For starters try these: </p> <p><a href="http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html" rel="nofollow">http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html</a></p> <pre><code>na_values : str or list-like or dict, default None Additional strings to recognize as NA/NaN. If dict passed, specific per-column NA values. By default the following values are interpreted as NaN: ‘’, ‘#N/A’, ‘#N/A N/A’, ‘#NA’, ‘-1.#IND’, ‘-1.#QNAN’, ‘-NaN’, ‘-nan’, ‘1.#IND’, ‘1.#QNAN’, ‘N/A’, ‘NA’, ‘NULL’, ‘NaN’, ‘nan’. keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they’re appended to. na_filter : boolean, default True Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance of reading a large file </code></pre>
-1
2016-08-07T22:08:12Z
[ "python", "pandas" ]
Skip rows with missing values in read_csv
38,818,609
<p>I have a very large csv which I need to read in. To make this fast and save RAM usage I am using read_csv and set the dtype of some columns to np.uint32. The problem is that some rows have missing values and pandas uses a float to represent those. </p> <ol> <li>Is it possible to simply skip rows with missing values? I know I could do this after reading in the whole file but this means I couldn't set the dtype until then and so would use too much RAM.</li> <li>Is it possible to convert missing values to some other I choose during the reading of the data? </li> </ol>
0
2016-08-07T21:12:55Z
38,819,123
<p>There is no feature in Pandas that does that. You can implement it in regular Python like this:</p> <pre><code>import csv import pandas as pd def filter_records(records): """Given an iterable of dicts, converts values to int. Discards any record which has an empty field.""" for record in records: for k, v in record.iteritems(): if v == '': break record[k] = int(v) else: # this executes whenever break did not yield record with open('t.csv') as infile: records = csv.DictReader(infile) df = pd.DataFrame.from_records(filter_records(records)) </code></pre> <p>Pandas uses the <code>csv</code> module internally anyway. If the performance of the above turns out to be a problem, you could probably speed it up with Cython (which Pandas also uses).</p>
0
2016-08-07T22:26:05Z
[ "python", "pandas" ]
Skip rows with missing values in read_csv
38,818,609
<p>I have a very large csv which I need to read in. To make this fast and save RAM usage I am using read_csv and set the dtype of some columns to np.uint32. The problem is that some rows have missing values and pandas uses a float to represent those. </p> <ol> <li>Is it possible to simply skip rows with missing values? I know I could do this after reading in the whole file but this means I couldn't set the dtype until then and so would use too much RAM.</li> <li>Is it possible to convert missing values to some other I choose during the reading of the data? </li> </ol>
0
2016-08-07T21:12:55Z
38,819,282
<p>It would be dainty if you could fill <code>NaN</code> with say <code>0</code> during read itself. Perhaps a feature request in Pandas's git-hub is in order...</p> <h1>Using a converter function</h1> <p>However, for the time being, you can define your own function to do that and pass it to the <code>converters</code> argument in <a href="http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html" rel="nofollow"><code>read_csv</code></a>:</p> <pre><code>def conv(val): if val == np.nan: return 0 # or whatever else you want to represent your NaN with return val df = pd.read_csv(file, converters={colWithNaN : conv}, dtypes=...) </code></pre> <p>Note that <code>converters</code> takes a <code>dict</code>, so you need to specify it for each column that has NaN to be dealt with. It can get a little tiresome if a lot of columns are affected. You can specify either column names or numbers as keys.</p> <p>Also note that this might slow down your <code>read_csv</code> performance, depending on how the <code>converters</code> function is handled. Further, if you just have one column that needs NaNs handled during read, you can skip a proper function definition and use a <code>lambda</code> function instead:</p> <pre><code>df = pd.read_csv(file, converters={colWithNaN : lambda x: 0 if x == np.nan else x}, dtypes=...) </code></pre> <hr> <h1>Reading in chunks</h1> <p>You could also read the file in small chunks that you stitch together to get your final output. You can do a bunch of things this way. Here is an illustrative example:</p> <pre><code>result = pd.DataFrame() df = pd.read_csv(file, chunksize=1000) for chunk in df: chunk.dropna(axis=0, inplace=True) # Dropping all rows with any NaN value chunk[colToConvert] = chunk[colToConvert].astype(np.uint32) result = result.append(chunk) del df, chunk </code></pre> <p>Note that this method does not strictly duplicate data. There is a time when the data in <code>chunk</code> exists twice, right after the <code>result.append</code> statement, but only <code>chunksize</code> rows are repeated, which is a fair bargain. This method may also work out to be faster than by using a converter function.</p>
2
2016-08-07T22:52:52Z
[ "python", "pandas" ]
How to create a new column based on one of three other columns?
38,818,626
<p>I have a Dataframe that has a movie name column and 3 other columns (let's call them A, B, and C) that are ratings from 3 different sources. There are many movies with only one rating, some movies with a combination from the 3 forums, and some with no ratings. I want to create a new column that will: </p> <ol> <li>If A column has associated rating, use A.</li> <li>If A column is empty, get associated rating from B.</li> <li>If B column is empty, get associated rating from C.</li> <li>If C column is empty, return "Unrated"</li> </ol> <p>This is what I have in my code so far:</p> <pre><code>def check_rating(rating): if newyear['Yahoo Rating'] != "\\N": return rating else: if newyear['Movie Mom Rating'] != "\\N": return rating else: if newyear['Critc Rating'] != "\\N": return rating else: return "Unrated" df['Rating'] = df.apply(check_rating, axis=1) </code></pre> <p>The error I get is: </p> <pre><code>ValueError: ('The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().', 'occurred at index 0') </code></pre> <p>For visual of my dataframe, here is <code>newyear.head()</code>:</p> <p><img src="http://i.stack.imgur.com/U0bmb.png" alt="newyear dataframe"></p> <p>I am not sure what this value error means to fix this problem and also if this is the right way to do it.</p>
0
2016-08-07T21:14:45Z
38,818,720
<p>You are returning <code>rating</code> in your original function .. but <code>rating</code> is the <strong>row</strong>, not the value of any column</p> <pre><code>&gt;&gt;&gt; df A B C Genre Title Year 0 7 6 \N g1 m1 y1 1 \N 5 7 g2 m2 y2 2 \N \N \N g3 m3 y3 3 \N 4 1 g4 m4 y4 4 \N \N 3 g5 m5 y5 &gt;&gt;&gt; def rating(row): if row['A'] != r'\N': return row['A'] if row['B'] != r'\N': return row['B'] if row['C'] != r'\N': return row['C'] return 'Unrated' &gt;&gt;&gt; df['Rating'] = df.apply(rating, axis = 1) &gt;&gt;&gt; df A B C Genre Title Year Rating 0 7 6 \N g1 m1 y1 7 1 \N 5 7 g2 m2 y2 5 2 \N \N \N g3 m3 y3 Unrated 3 \N 4 1 g4 m4 y4 4 4 \N \N 3 g5 m5 y5 3 </code></pre>
1
2016-08-07T21:25:34Z
[ "python", "pandas" ]
How to create a new column based on one of three other columns?
38,818,626
<p>I have a Dataframe that has a movie name column and 3 other columns (let's call them A, B, and C) that are ratings from 3 different sources. There are many movies with only one rating, some movies with a combination from the 3 forums, and some with no ratings. I want to create a new column that will: </p> <ol> <li>If A column has associated rating, use A.</li> <li>If A column is empty, get associated rating from B.</li> <li>If B column is empty, get associated rating from C.</li> <li>If C column is empty, return "Unrated"</li> </ol> <p>This is what I have in my code so far:</p> <pre><code>def check_rating(rating): if newyear['Yahoo Rating'] != "\\N": return rating else: if newyear['Movie Mom Rating'] != "\\N": return rating else: if newyear['Critc Rating'] != "\\N": return rating else: return "Unrated" df['Rating'] = df.apply(check_rating, axis=1) </code></pre> <p>The error I get is: </p> <pre><code>ValueError: ('The truth value of a Series is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().', 'occurred at index 0') </code></pre> <p>For visual of my dataframe, here is <code>newyear.head()</code>:</p> <p><img src="http://i.stack.imgur.com/U0bmb.png" alt="newyear dataframe"></p> <p>I am not sure what this value error means to fix this problem and also if this is the right way to do it.</p>
0
2016-08-07T21:14:45Z
38,818,758
<p>I would do something like this:</p> <pre><code>df = df.replace('\\N', np.nan) # this requires import numpy as np (df['Yahoo Rating'].fillna(df['Movie Mom Rating'] .fillna(df['Critic Rating'] .fillna("Unrated")))) </code></pre> <p>The reason that your code doesn't work is that <code>newyear['Yahoo Rating'] != "\\N"</code> is a boolean array. What you say here is something like <code>if [True, False, True, False]:</code>. That's the source of ambiguity. How do you evaluate such a condition? Would you execute if all of them True or would just one of them be enough?</p> <p>As <a href="http://stackoverflow.com/a/38818720/2285236">M. Klugerford explained</a>, you can change it so it is evaluated row by row (therefore returns a single value). However, row by row apply operations are generally slow and pandas has great tools for handling missing data. That's why I am suggesting this.</p>
4
2016-08-07T21:30:41Z
[ "python", "pandas" ]
Python - TypeError: unsupported operand type(s) for *: 'NoneType' and 'int'
38,818,817
<p>I have a program that allows a user to convert between currencies and allow them to have bank accounts for five different currencies. </p> <p>When I try to display the bank account balance to the user, I get a message saying that 'value' is a <code>Nonetype</code>, yet I don't understand how <code>None</code> is being returned.</p> <pre><code>print("Check balance of which account?") print("1. USD | 2. EUR | 3. JPY | 4. GBP | 5. RUB") acntaction = input() if acntaction == "1": if player.USDhasBankAccount == True: value = bank.USDCheckBalance(player.USD) print("Bank Account Balance: " + str(value * currencies[0].getVal()) + " " + currencies[0].getCurName() + ".") # prints bank account balance to player else: print("You do not have a USD bank account!") </code></pre> <p>Here is the USDCheckBalance method:</p> <pre><code>def USDCheckBalance(self, USDaccountName): i = 0 while i &lt; len(self.USDaccountList): USDaccountCheck = self.USDaccountList[i].owner if USDaccountName == USDaccountCheck: USDaccount = self.USDaccountList[i] return USDaccount.value break else: i = i + 1 print("You don't have an account!") </code></pre> <p>There is a list created when the user opens their USD bank account and there is only one. Please let me know how I can resolve this!!!</p>
0
2016-08-07T21:38:41Z
38,818,873
<p>You should ensure that your USDCheckBalance method does return an int. It seems that there are cases where it doesn't return anything, causing the error. (The "you don't have an account" case)</p>
2
2016-08-07T21:48:12Z
[ "python", "methods" ]
Python - TypeError: unsupported operand type(s) for *: 'NoneType' and 'int'
38,818,817
<p>I have a program that allows a user to convert between currencies and allow them to have bank accounts for five different currencies. </p> <p>When I try to display the bank account balance to the user, I get a message saying that 'value' is a <code>Nonetype</code>, yet I don't understand how <code>None</code> is being returned.</p> <pre><code>print("Check balance of which account?") print("1. USD | 2. EUR | 3. JPY | 4. GBP | 5. RUB") acntaction = input() if acntaction == "1": if player.USDhasBankAccount == True: value = bank.USDCheckBalance(player.USD) print("Bank Account Balance: " + str(value * currencies[0].getVal()) + " " + currencies[0].getCurName() + ".") # prints bank account balance to player else: print("You do not have a USD bank account!") </code></pre> <p>Here is the USDCheckBalance method:</p> <pre><code>def USDCheckBalance(self, USDaccountName): i = 0 while i &lt; len(self.USDaccountList): USDaccountCheck = self.USDaccountList[i].owner if USDaccountName == USDaccountCheck: USDaccount = self.USDaccountList[i] return USDaccount.value break else: i = i + 1 print("You don't have an account!") </code></pre> <p>There is a list created when the user opens their USD bank account and there is only one. Please let me know how I can resolve this!!!</p>
0
2016-08-07T21:38:41Z
38,818,923
<p>If there is no bank account, you implicitly return <code>None</code>. Best use exceptions in that case:</p> <pre><code>def USDCheckBalance(self, USDaccountName): for account in self.USDaccountList: if USDaccountName == account.owner: return account.value raise KeyError("You don't have an account!") </code></pre> <p>and</p> <pre><code>print("Check balance of which account?") print("1. USD | 2. EUR | 3. JPY | 4. GBP | 5. RUB") acntaction = input() if acntaction == "1": try: value = bank.USDCheckBalance(player.USD) print("Bank Account Balance: {} {}.".format(value * currencies[0].getVal(), currencies[0].getCurName()) except KeyError: print("You do not have a USD bank account!") </code></pre>
3
2016-08-07T21:54:49Z
[ "python", "methods" ]
Selenium - Login raises ElementNotVisibleException
38,818,840
<p>I am using Selenium Webdriver to login to a site. I've tried multiple different selectors, and have tried implicit waits, but cannot locate the element. </p> <pre><code> from selenium import webdriver from selenium.webdriver.common.by import By browser = webdriver.Firefox() url = "https://www.example.com" login_page = browser.get(url) username = browser.find_element_by_id("Email") # Also tried: # username = browser.find_element_by_xpath('//*[@id="Email"]') # username = browser.find_element_by_css_selector('#Email') username.send_keys("email") </code></pre> <p>This is the html</p> <pre><code>&lt;div class="form-group"&gt; &lt;label for="Email"&gt;Email address&lt;/label&gt; &lt;div class="input-group" style="width: 100%"&gt; &lt;input class="form-control email" data-val="true" data-val-length="Maximum length is 50" data-val-length-max="50" data-val-regex="Provided email address is not valid" data-val-regex-pattern="^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" data-val-required="Email is required" id="Email" name="Email" type="email" value=""&gt;&lt;br&gt; &lt;span class="field-validation-valid" data-valmsg-for="Email" data-valmsg-replace="true"&gt;&lt;/span&gt; &lt;/div&gt; &lt;/div&gt; </code></pre> <p>Here is the error message</p> <pre><code>Traceback (most recent call last): File "seleniumloginpi.py", line 12, in &lt;module&gt; email.send_keys('email') File "/Users/greg/anaconda/envs/trade/lib/python2.7/site-packages/selenium/webdriver/remote/webelement.py", line 320, in send_keys self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': keys_to_typing(value)}) File "/Users/greg/anaconda/envs/trade/lib/python2.7/site-packages/selenium/webdriver/remote/webelement.py", line 461, in _execute return self._parent.execute(command, params) File "/Users/greg/anaconda/envs/trade/lib/python2.7/site-packages/selenium/webdriver/remote/webdriver.py", line 236, in execute self.error_handler.check_response(response) File "/Users/greg/anaconda/envs/trade/lib/python2.7/site-packages/selenium/webdriver/remote/errorhandler.py", line 192, in check_response raise exception_class(message, screen, stacktrace) selenium.common.exceptions.ElementNotVisibleException: Message: Element is not currently visible and so may not be interacted with Stacktrace: at fxdriver.preconditions.visible (file:///var/folders/2h/3nnr94wx0f9g9bjcl0ks_g1w0000gn/T/tmpfAR5E7/extensions/fxdriver@googlecode.com/components/command-processor.js:10092) at DelayedCommand.prototype.checkPreconditions_ (file:///var/folders/2h/3nnr94wx0f9g9bjcl0ks_g1w0000gn/T/tmpfAR5E7/extensions/fxdriver@googlecode.com/components/command-processor.js:12644) at DelayedCommand.prototype.executeInternal_/h (file:///var/folders/2h/3nnr94wx0f9g9bjcl0ks_g1w0000gn/T/tmpfAR5E7/extensions/fxdriver@googlecode.com/components/command-processor.js:12661) at DelayedCommand.prototype.executeInternal_ (file:///var/folders/2h/3nnr94wx0f9g9bjcl0ks_g1w0000gn/T/tmpfAR5E7/extensions/fxdriver@googlecode.com/components/command-processor.js:12666) at DelayedCommand.prototype.execute/&lt; (file:///var/folders/2h/3nnr94wx0f9g9bjcl0ks_g1w0000gn/T/tmpfAR5E7/extensions/fxdriver@googlecode.com/components/command-processor.js:12608) </code></pre> <p>Any help would be greatly appreciated. </p>
3
2016-08-07T21:42:40Z
38,819,149
<p>Actually you are locating element, problem with the <code>send_keys</code>, here could not be set value on the email input due to invisibility of element. But as I see in provided HTML no style attribute property exists on email input element which could make it invisible. </p> <p>May be possible there are more elements with the same id and you are interacting with other element, you should try with some different locator as below :-</p> <pre><code>username = browser.find_element_by_css_selector('div.input-group input#Email.form-control.email') username.send_keys("email") </code></pre> <p>Or try to find all elements with the Id <code>Email</code> and perform <code>send_keys()</code> on visible element as below :</p> <pre><code>usernames = browser.find_elements_by_id('Email') for username in usernames: if username.is_displayed(): username.send_keys("email") break </code></pre>
1
2016-08-07T22:31:18Z
[ "python", "selenium", "selenium-webdriver" ]
Scrapy Pipeline not insert data to MySQL
38,818,846
<p>I'm making a pipeline in scrapy to store scraped data in a mysql database. When the spider is run in terminal it works perfectly. Even the pipeline is opened. However the data is not being sent to the database. Any help appreciated! :)</p> <p>here's the pipeline code:</p> <pre><code>import scrapy from scrapy.pipelines.images import ImagesPipeline from scrapy.exceptions import DropItem from scrapy.http import Request import datetime import logging import MySQLdb import MySQLdb.cursors from scrapy.exceptions import DropItem from en_movie.items import EnMovie class DuplicatesPipeline(object): def __init__(self): self.ids_seen = set() def process_item(self, item, spider): if item['image_urls'] in self.ids_seen: raise DropItem("Duplicate item found: %s" % item) else: self.ids_seen.add(item['image_urls']) return item class MyImagesPipeline(ImagesPipeline): def get_media_requests(self, item, info): for image_url in item['image_urls']: yield scrapy.Request(image_url) def item_completed(self, results, item, info): image_paths = [x['path'] for ok, x in results if ok] if not image_paths: raise DropItem("Item contains no images") item['image_paths'] = ', '.join(image_paths) return item class EnMovieStorePipeline(object): def __init__(self): self.conn = MySQLdb.connect(host="localhost", user="root", passwd="pass", db="passdb", charset="utf8", use_unicode=True) self.cursor = self.conn.cursor() def process_item(self, item, spider): cursor.execute("""SELECT * FROM dmmactress_enmovielist WHERE Content_ID = %s and release_date = %s and running_time = %s and Actress = %s and Series = %s and Studio = %s and Director = %s and Label = %s and image_paths = %s and image_urls = %s""", (item['Content_ID'][0].encode('utf-8'), item['release_date'][0].encode('utf-8'), item['running_time'][0].encode('utf-8'), item['Actress'][0].encode('utf-8'), item['Series'][0].encode('utf-8'), item['Studio'][0].encode('utf-8'), item['Director'][0].encode('utf-8'), item['Label'][0].encode('utf-8'), item['image_urls'][0].encode('utf-8'))) result = self.cursor.fetchone() if result: print("data already exist") else: try: cursor.execute("""INSERT INTO dmmactress_enmovielist(Content_ID, release_date, running_time, Actress, Series, Studio, Director, Label, image_paths, image_urls) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""", (item['Content_ID'][0].encode('utf-8'), item['release_date'][0].encode('utf-8'), item['running_time'][0].encode('utf-8'), item['Actress'][0].encode('utf-8'), item['Series'][0].encode('utf-8'), item['Studio'][0].encode('utf-8'), item['Director'][0].encode('utf-8'), item['Label'][0].encode('utf-8'), item['image_urls'][0].encode('utf-8'))) self.conn.commit() except MySQLdb.Error as e: print ("Error %d: %s" % (e.args[0], e.args[1])) return item </code></pre> <p>EDIT:</p> <pre><code> def parse_item(self, response): for sel in response.xpath('//*[@id="contents"]/div[10]/section/section[1]/section[1]'): item = EnMovie() Content_ID = sel.xpath('normalize-space(div[2]/dl/dt[contains (.,"Content ID:")]/following-sibling::dd[1]/text())').extract() item['Content_ID'] = Content_ID[0].encode('utf-8') release_date = sel.xpath('normalize-space(div[2]/dl[1]/dt[contains (.,"Release Date:")]/following-sibling::dd[1]/text())').extract() item['release_date'] = release_date[0].encode('utf-8') running_time = sel.xpath('normalize-space(div[2]/dl[1]/dt[contains (.,"Runtime:")]/following-sibling::dd[1]/text())').extract() item['running_time'] = running_time[0].encode('utf-8') Series = sel.xpath('normalize-space(div[2]/dl[2]/dt[contains (.,"Series:")]/following-sibling::dd[1]/text())').extract() item['Series'] = Series[0].encode('utf-8') Studio = sel.xpath('normalize-space(div[2]/dl[2]/dt[contains (.,"Studio:")]/following-sibling::dd[1]/a/text())').extract() item['Studio'] = Studio[0].encode('utf-8') Director = sel.xpath('normalize-space(div[2]/dl[2]/dt[contains (.,"Director:")]/following-sibling::dd[1]/text())').extract() item['Director'] = Director[0].encode('utf-8') Label = sel.xpath('normalize-space(div[2]/dl[2]/dt[contains (.,"Label:")]/following-sibling::dd[1]/text())').extract() item['Label'] = Label[0].encode('utf-8') item['image_urls'] = sel.xpath('div[1]/img/@src').extract() actresses = sel.xpath("//*[@itemprop='actors']//*[@itemprop='name']/text()").extract() actress = [x.strip() for x in actresses] item['Actress'] = ", ".join(actress) yield item </code></pre>
0
2016-08-07T21:43:14Z
38,826,691
<p>I got this working a few weeks ago but switched to a different database. At the time I based some code off of this: <a href="https://gist.github.com/tzermias/6982723" rel="nofollow">https://gist.github.com/tzermias/6982723</a></p> <p>Also remember to update your settings.py file as well if you use the above code...</p>
0
2016-08-08T10:20:09Z
[ "python", "mysql", "scrapy" ]
add timestamps related fields from existing unix timestamp field dataframe in python
38,818,924
<p>I am new to Python Programing language. For my learning I took sample sales data that contain <strong>orderid, product_name, no_items, amount, total_amount, sale_time</strong>. I am using pandas to load the data.</p> <pre><code>import pandas as pd sale_data = pd.read_csv("test_sale_data.csv") </code></pre> <p><strong>sale_time</strong> field have unix time and I am using <strong>to_datetime</strong> function to convert into readable datetime format.</p> <pre><code>sale_data['sale_time_new'] = to_datetime(sale_data['sale_time'],time='s') # 1284101485 --&gt; 2010-09-10 06:51:25 </code></pre> <p>So I am trying to add timestamps related fields to existing sale_data dataframe like <strong>Year, Quarter, YearQuarter, Month, YearMonth, Week, YearWeek, date, day, datehour, hour</strong>.</p> <pre><code>1284101485 --&gt; 2010, Q3, 2010-Q3, 09, 201009, 36, 2010-WW36, 2010-09-10, 10, 2010-09-10 06:00:00 , 06 </code></pre> <p>How can I do in python ?</p>
0
2016-08-07T21:54:58Z
38,819,189
<p>Use the <a href="http://pandas.pydata.org/pandas-docs/version/0.18.1/basics.html#basics-dt-accessors" rel="nofollow"><code>.dt</code> accessor</a> in pandas; all of <a href="http://pandas.pydata.org/pandas-docs/version/0.18.1/timeseries.html#time-date-components" rel="nofollow">these components</a> are available. For your use case:</p> <pre><code>sale_data['sale_time_new'] = to_datetime(sale_data['sale_time'],time='s') # Year sale_data['sale_time_new'].dt.year # Quarter sale_data['sale_time_new'].dt.quarter # YearQuarter '{y}-{q}'.format(y=sale_data['sale_time_new'].dt.year, q=sale_data['sale_time_new'].dt.year) # Month sale_data['sale_time_new'].dt.month # YearMonth sale_data['sale_time_new'].dt.year*100 + sale_data['sale_time_new'].dt.month # Week sale_data['sale_time_new'].dt.week # YearWeek '{y}-WW{w}'.format(y=sale_data['sale_time_new'].dt.year, w=sale_data['sale_time_new'].dt.week) # date sale_data['sale_time_new'].dt.date # day sale_data['sale_time_new'].dt.day # datehour '{y}-{m}-{d} {h}:00:00'.format(y=sale_data['sale_time_new'].dt.year, m=sale_data['sale_time_new'].dt.month, d=sale_data['sale_time_new'].dt.day, h=sale_data['sale_time_new'].dt.hour) # hour sale_data['sale_time_new'].dt.hour </code></pre>
0
2016-08-07T22:38:13Z
[ "python", "python-2.7", "datetime", "pandas", "time-series" ]
Most efficient way to check twitter friendship? (over 5000 check)
38,818,981
<p>I'm facing problem like this. I used tweepy to collect +10000 tweets, i use nltk naive-bayes classification and filtered the tweets into +5000. I want to generate a graph of user friendship from that classified 5000 tweet. The problem is that I am able to check it with tweepy.api.show_frienship(), but it takes so much and much time and sometime ended up with endless ratelimit error. </p> <p>is there any way i can check the friendship more eficiently?</p>
2
2016-08-07T22:03:37Z
38,819,049
<p>I don't know much about the limits with Tweepy, but you can always write a basic web scraper with urllib and BeautifulSoup to do so.</p> <p>You could take a website such as <code>www.doesfollow.com</code> which accomplishes what you are trying to do. (not sure about request limits with this page, but there are dozens of other websites that do the same thing) This website is interesting because the url is super simple.</p> <p>For example, in order to check if Google and Twitter are "friends" on Twitter, the link is simply <code>www.doesfollow.com/google/twitter</code>.</p> <p>This would make it very easy for you to run through the users as you can just append the users to the url such as <code>'www.doesfollow.com/'+ user1 + '/' + user2</code></p> <p>The results page of doesfollow has this tag if the users are friends on Twitter:</p> <p><code>&lt;div class="yup"&gt;yup&lt;/div&gt;</code>, </p> <p>and this tag if the users are not friends on Twitter:</p> <p><code>&lt;div class="nope"&gt;nope&lt;/div&gt;</code></p> <p>So you could parse the page source code and search to find which of those tags exist to determine if the users are friends on Twitter.</p> <p>This might not be the way that you wanted to approach the problem, but it's a possibility. I'm not entirely sure how to approach the graphing part of your question though. I'd have to look into that.</p>
0
2016-08-07T22:14:19Z
[ "python", "twitter", "tweepy" ]
ImgurPython ConfigParser error
38,819,009
<p>I've come back to Python after a couple of years of not having used it. I'm testing the <a href="https://github.com/Imgur/imgurpython" rel="nofollow">Imgur Python library</a>. After a fresh install of Python 2.7.12, I did a quick pip install of ImgurPython, then dragged a small folder of the sample scripts to my desktop for testing. </p> <p>The auth.py sample script begins with a function that includes: </p> <pre><code># Get client ID and secret from auth.ini config = get_config() config.read('auth.ini') client_id = config.get('credentials', 'client_id') client_secret = config.get('credentials', 'client_secret') client = ImgurClient(client_id, client_secret) </code></pre> <p>The auth.ini file is in the same folder as the auth.py folder, and contains my client ID and secret. However, when running the script, I get: </p> <pre><code>C:\Windows\system32&gt;python C:\Users\[REDACTED]\Desktop\imgtest\auth.py Traceback (most recent call last): File "C:\Users\[REDACTED]\Desktop\imgtest\auth.py", line 41, in &lt;module&gt; authenticate() File "C:\Users\[REDACTED]\Desktop\imgtest\auth.py", line 16, in authenticate client_id = config.get('credentials', 'client_id') File "J:\Python27\lib\ConfigParser.py", line 607, in get raise NoSectionError(section) ConfigParser.NoSectionError: No section: 'credentials' </code></pre> <p>Removing the need to get the credentials from the auth.ini file and placing them directly in the Python script has it run with no error. </p> <p>I'm sure I'm overlooking something simple, but I could use some assistance in figuring out why python won't read the auth.ini file. </p>
0
2016-08-07T22:07:49Z
38,819,610
<p>You may have a bad format of <code>auth.ini</code> file. In my python 2.7, the extraction works totally fine with <code>ConfigParser</code>:</p> <pre><code>$ cat auth.ini [credentials] client_id=foo client_secret=bar </code></pre> <p>In my Ipython:</p> <pre><code>In [1]: import ConfigParser In [2]: config = ConfigParser.ConfigParser() In [3]: config.read('auth.ini') Out[3]: ['auth.ini'] In [4]: client_id = config.get('credentials', 'client_id') In [5]: client_secret = config.get('credentials', 'client_secret') In [6]: client_id Out[6]: 'foo' In [7]: client_secret Out[7]: 'bar' </code></pre>
0
2016-08-07T23:47:47Z
[ "python", "imgur" ]
Python 3.5 - Tkinter Using variables for filenames + paths
38,819,048
<p>I have a combo box which always sets two variables<br> 1) is a filename (.csv)<br> 2) is a file path location</p> <p>Depending on what is selected in the combo box, a particular .csv file is allocated to variable <code>csvfilename</code> and a path is allocated to <code>path</code> </p> <p>I'm trying to feed these variables into the below code to load certain files from a fixed file location.</p> <p><code>with open(csvfilename) as csvfile:</code> </p> <p>and </p> <pre><code>self.DwgOutputBox.tag_bind("dwg", "&lt;ButtonRelease-1&gt;", lambda event, filename=path + '/' + linkname: subprocess.run(filename, shell=True)) </code></pre> <p>however I'm getting a<br> <code>"TypeError: unsupported operand type(s) for +: 'StringVar' and 'str'"</code> message when testing using the PATH variable</p> <p>and a </p> <p><code>with open(csvfilename) as csvfile: TypeError: invalid file: tkinter.StringVar object at 0x024F4F50</code> when trying to use the csv variable.</p> <p>When I test the code by setting the path and csv filename myself it works correctly, so it appears to be an issue with either not being able to use variables for file names or my syntax is incorrect.</p>
-1
2016-08-07T22:14:14Z
38,819,067
<p><code>path</code> is a <code>StringVar</code> object, and you are trying to add <code>/</code> to it.</p> <p>Instead of trying to define a keyword argument, pass in the <code>StringVar</code> objects (<code>path</code> and <code>linkname</code>) and get their values <em>in the <code>lambda</code></em>:</p> <pre><code>self.DwgOutputBox.tag_bind( "dwg", "&lt;ButtonRelease-1&gt;", lambda event, path=path, linkname=linkname: subprocess.run( os.path.join(path.get(), linkname.get()), shell=True)) </code></pre> <p>The <code>path.get()</code> call retrieves the current value of the <code>StringVar</code> object. I used <code>os.path.join()</code> instead of manual concatenation so that your code also works on an OS where the path separator is not <code>/</code>. </p> <p>You'll have to do something similar for the <code>with open(...)</code> call; retrieve the string values contained <em>in</em> the <code>StringVar</code> objects.</p>
0
2016-08-07T22:18:09Z
[ "python", "python-3.x", "combobox", "tkinter" ]
Python 3.5 - Tkinter Using variables for filenames + paths
38,819,048
<p>I have a combo box which always sets two variables<br> 1) is a filename (.csv)<br> 2) is a file path location</p> <p>Depending on what is selected in the combo box, a particular .csv file is allocated to variable <code>csvfilename</code> and a path is allocated to <code>path</code> </p> <p>I'm trying to feed these variables into the below code to load certain files from a fixed file location.</p> <p><code>with open(csvfilename) as csvfile:</code> </p> <p>and </p> <pre><code>self.DwgOutputBox.tag_bind("dwg", "&lt;ButtonRelease-1&gt;", lambda event, filename=path + '/' + linkname: subprocess.run(filename, shell=True)) </code></pre> <p>however I'm getting a<br> <code>"TypeError: unsupported operand type(s) for +: 'StringVar' and 'str'"</code> message when testing using the PATH variable</p> <p>and a </p> <p><code>with open(csvfilename) as csvfile: TypeError: invalid file: tkinter.StringVar object at 0x024F4F50</code> when trying to use the csv variable.</p> <p>When I test the code by setting the path and csv filename myself it works correctly, so it appears to be an issue with either not being able to use variables for file names or my syntax is incorrect.</p>
-1
2016-08-07T22:14:14Z
38,819,137
<p>A tkinter Stringvar is not a python string. To return the value of a Stringvar you use <code>variable.get()</code></p> <p>Since <code>path</code> is not a string you cant add a string to it, put <code>path.get() + ...</code> . What happens if tye path entered doesn't exist? Does the progrsm crash?</p>
1
2016-08-07T22:28:35Z
[ "python", "python-3.x", "combobox", "tkinter" ]
Unicode Python Windows Consle
38,819,079
<p>I have a program for a Blackjack game. It is not completed but I am having an issue with Unicode. When it prints a string it is not in the correct format (Look Below). It is Python 3.</p> <pre><code>from unicurses import * import sys import time from random import choice as rc import random def total(hand): #total aces = hand.count(11) t = sum(hand) if t &gt; 21 and aces &gt; 0: while aces &gt; 0 and t &gt; 21: t -= 10 aces -= 1 return t def random(one,two): import random number = random.randint(one,two) return number def number(): number = random(1,13) if number == 13: value = '0' elif number == 12: value = '0' elif number == 11: value = '0' elif number == 10: value = '0' elif number &lt; 10: value = number return value def card(): cardnumber = number() card = cardnumber return card def store10Cards(): tenCards = [card(), card(), card(), card(), card(), card(), card(), card(), card(), card()] return tenCards stdscr = initscr() cards = store10Cards() mvaddstr(10, 10, '┌──────────┐\n│{} │\n│ │\n│ │\n│ │\n│ │\n│ │\n│ │\n│ │\n│ {}│\n└──────────┘'.format(cards[0], cards[0]).encode('utf-8')) #deck = [] #deck of cards #cwin = 0 #computer wins #pwin = 0 #player wins #for i in range (2, 11): # for j in range ([24, 72][i == 10]): # deck.append(i) #stdscr = initscr() #Start the standard screen start_color() #Start the color service init_pair(1, COLOR_RED, 0) #color pair 1 is red text and no highlighting init_pair(2, COLOR_WHITE, 0) #color pair 2 is white text and no highlighting addstr('\n\n------------------------------------------------------------------------', color_pair(1) | A_BOLD) addstr('\n\n\nAre You Ready To Try And Defeat The Undefeated Blackjack Master?\n\n\nThe First One To Win 10 Hands Wins!!!\n\n\nAces Are Counted As 11s And Automaticlly Change To 1 If You Go Over 21. \n\n\nWinning 1 Hand Does Not Mean You Win The Game\n\n\nIt Is Possible To Win But Very Hard!!! Good Luck!!!\n\n\n', color_pair(2)) addstr('------------------------------------------------------------------------', color_pair(1) | A_BOLD) addstr('\n\nDo You Know The Rules Of Blackjack? (Y or N)\n\n', color_pair(2)) addstr('------------------------------------------------------------------------', color_pair(1) | A_BOLD) refresh() #refreshes cbreak() #lets user do Ctrl + C noecho() #Stops inputs from being shown a = getch() #Gets input while a == ord('q'): #Checks if input is q clear() #clears before exit exit() #quits while a == ord('n'): #Checks if input is n clear() #clears before displaying the next text addstr('\n\n\n----------------------------------------------------------------------------------', color_pair(1) | A_BOLD) addstr('\n\nOk. That Is Ok. The Rules Are As Follows: \n\nThe Goal Is To Get To 21.\n\nYou Can Hit A Card (Receive Another) Or Stand And See What The Dealer Has And Does.\n\nThe Person With The Higest Value 21 Or Below Wins That Hand.\n\nAces Start As 11s But Change Automatticly To 1s If You Go Over 21 To Save You.\n\nIf You Get 21 Exactly You Get A Blackjack.\n\nA Blackjack Means You Automaticly Win Unless The Computer Gets A 21 And Ties You.\n\nBe Careful Not To Hit Too Many Times Because If You Go Over 21 You Lose That Hand.\n\nAny Face Card Is Automaticly A 10 At All Times.\n', color_pair(2)) addstr('\n----------------------------------------------------------------------------------\n\n\n', color_pair(1) | A_BOLD) sleep(15) a = getch() #resets input variable while a == ord('q'): #Checks if input is q clear() #Clears before exit exit() #quits while a == ord('y'): #Checks if input is y clear() #clears before displaying the next text addstr('\n\n\n\n\n----------------------------------------------------------------------------------', color_pair(1) | A_BOLD) addstr('\n\n\n\nGreat!\n\n\n\nHave Fun!\n\n\n\nGood Luck!\n\n\n', color_pair(2)) addstr('----------------------------------------------------------------------------------\n\n\n', color_pair(1) | A_BOLD) refresh() time.sleep(1) b = '1' a = '' #resets input variable </code></pre> <p>Here is the line I am having issues with:</p> <pre><code>mvaddstr(10, 10, '┌──────────┐\n│{} │\n│ │\n│ │\n│ │\n│ │\n│ │\n│ │\n│ │\n│ {}│\n└──────────┘'.format(cards[0], cards[0]).encode('utf-8')) </code></pre> <p>I tried to encode it with UTF-8 and set Command Prompt to UTF-8 by doing chcp then when I run that program that string prints: </p> <blockquote> <p>b'\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\x e2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x90\n\xe2\x94\x823 \xe2\x94\x82\n\xe2\x94\x82 \t \xe2\x94\x82\n\xe2\x94\x82 \t \xe2 \x94\x82\n\xe2\x94\x82 \t \xe2\x94\x82\n\xe2\x94\x82 \t \xe2\x94\x82\n\x e2\x94\x82 \t \xe2\x94\x82\n\xe2\x94\x82 \t \xe2\x94\x82\n\xe2\x94\x82 \t \xe2\x94\x82\n\xe2\x94\x82 3\xe2\x94\x82\n\xe2\x94\x94\xe2\x94\x80\xe 2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\ x94\x80\xe2\x94\x80\xe2\x94\x98'</p> </blockquote> <p>Please help me print the string like it should be and not be in unicode. </p> <p>If you have any other suggestions on my code let me know.</p>
1
2016-08-07T22:19:27Z
38,820,585
<p>On Windows, unicurses uses ctypes to wrap <a href="https://sourceforge.net/projects/pdcurses" rel="nofollow">PDCurses</a>. This library is optionally built with Unicode support in the 32-bit DLLs that are distributed on the project site. FYI, the Unicode versions call console wide-character APIs such as <code>WriteConsoleOutputW</code>, so don't worry about the legacy console codepage. You need the "pdc34dllu.zip" build, which takes UTF-8 encoded parameters. Because the pre-built DLLs are only 32-bit, you'll have to use 32-bit Python, unless you have Visual Studio 2015 installed to build a 64-bit DLL. </p> <p>You'll also have to make a couple of changes to unicurses to make this work. Here are the steps I followed to get this working for 32-bit Python 3.5:</p> <ol> <li>Download <a href="https://sourceforge.net/projects/pdcurses/files/pdcurses/3.4/pdc34dllu.zip/download" rel="nofollow">pdc34dllu.zip</a>. Extract "pdcurses.dll" to either Python's installation directory, i.e. where python.exe is located, or to a directory that's in the <code>PATH</code> environment variable.</li> <li>Download and extract <a href="https://sourceforge.net/projects/pyunicurses/files/unicurses-1.2/UniCurses-1.2.zip/download" rel="nofollow">UniCurses-1.2.zip</a>. </li> <li>Before installing, make the following changes to unicurses.py. On line 34 change <code>code = locale.getpreferredencoding()</code> to <code>code = 'utf-8'</code>. Next, comment out lines 52-54, which start with <code>if not os.access("pdcurses.dll",os.F_OK)</code> because this check will always fail if "pdcurses.dll" isn't in the working directory.</li> <li>Install the package by running <code>python setup.py install</code>.</li> </ol> <h2>example: printing a card</h2> <pre><code>import unicurses card_template = '\n'.join([ '┌────────────┐', '│{card} │', '│ │', '│ │', '│ │', '│ │', '│ │', '│ │', '│ │', '│ {card}│', '└────────────┘']) def main(): stdscr = unicurses.initscr() unicurses.clear() card_string = card_template.format(card=5) for i, line in enumerate(card_string.splitlines()): unicurses.mvaddstr(5 + i, 5, line) unicurses.refresh() unicurses.endwin() if __name__ == '__main__': main() </code></pre> <h2>output</h2> <pre class="lang-none prettyprint-override"><code> ┌────────────┐ │5 │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ 5│ └────────────┘ </code></pre>
1
2016-08-08T02:40:05Z
[ "python", "windows", "unicode", "utf-8", "utf" ]
How to kill Finished threads in python
38,819,091
<p>My multi-threading script raising this error :</p> <pre><code>thread.error : can't start new thread </code></pre> <p>when it reached 460 threads :</p> <pre><code>threading.active_count() = 460 </code></pre> <p>I assume the old threads keeps stack up, since the script didn't kill them. This is my code:</p> <pre><code>import threading import Queue import time import os import csv def main(worker): #Do Work print worker return def threader(): while True: worker = q.get() main(worker) q.task_done() def main_threader(workers): global q global city q = Queue.Queue() for x in range(20): t = threading.Thread(target=threader) t.daemon = True print "\n\nthreading.active_count() = " + str(threading.active_count()) + "\n\n" t.start() for worker in workers: q.put(worker) q.join() </code></pre> <p>How do I kill the old threads when their job is done? (Is return not enough?)</p>
0
2016-08-07T22:21:19Z
38,820,283
<p>Your <code>threader</code> function never exits, so your threads never die. Since you're just processing one fixed set of work and never adding items after you start working, you could set the threads up to exit when the queue is empty.</p> <p>See the following altered version of your code and the comments I added:</p> <pre><code>def threader(q): # let the thread die when all work is done while not q.empty(): worker = q.get() main(worker) q.task_done() def main_threader(workers): # you don't want global variables #global q #global city q = Queue.Queue() # make sure you fill the queue *before* starting the worker threads for worker in workers: q.put(worker) for x in range(20): t = threading.Thread(target=threader, args=[q]) t.daemon = True print "\n\nthreading.active_count() = " + str(threading.active_count()) + "\n\n" t.start() q.join() </code></pre> <p>Notice that I removed <code>global q</code>, and instead I pass <code>q</code> to the thread function. You don't want threads created by a previous call to end up sharing a <code>q</code> with new threads (<em>edit</em> although <code>q.join()</code> prevents this anyway, it's still better to avoid globals).</p>
0
2016-08-08T01:54:12Z
[ "python", "multithreading" ]
Reading a TTree in root using PyRoot
38,819,098
<p>I just started using pyroot to read root files and I can't read the data from a file using jupyter notebook. Here is how the TBrowser looks like:</p> <p><a href="http://i.stack.imgur.com/mt1ZZ.png" rel="nofollow"><img src="http://i.stack.imgur.com/mt1ZZ.png" alt="enter image description here"></a></p> <p>I started like this:</p> <pre><code>import ROOT as root import numpy as np f = root.TFile("RealData.root") myTree = f.Get("tree") entries = myTree.GetEntriesFast() </code></pre> <p>Up to here it is working and if I print entries I get the right number of entires I have in the file. But i don't know how to read actual data from the tree (event_number, n_channels, etc.) If I try something like <code>myTree.events</code> or <code>myTree.event_number</code> the kernel stops working. What should I do to read the data from the tree?</p>
0
2016-08-07T22:23:15Z
38,847,387
<p>Normally with pyROOT, you can just do something like:</p> <pre><code>import ROOT as root import numpy as np f = root.TFile("RealData.root") myTree = f.Get("tree") for entry in myTree: # Now you have acess to the leaves/branches of each entry in the tree, e.g. events = entry.events </code></pre> <p>I don't know enough about how jupyter works to know if that would cause any particular problems. Have you tried running the same script just using a regular python interpreter?</p>
0
2016-08-09T09:47:53Z
[ "python", "root-framework", "pyroot" ]
Python Script Pexpect SSH
38,819,147
<p>I'm trying to do a Python script (with a loop) to connect in SSH,it works if everything is good ( password and route) but stop when it's not working(wrong password or no routes to host ).Here is the important part of the script, how can I control if everything is working ? </p> <pre><code>connexion = pexpect.spawn("ssh -o StrictHostKeyChecking=no "+user+"@" + ip ) index=connexion.expect(':') connexion.sendline(password + "\r") connexion.expect('&gt;') connexion.sendline('show clock \r') connexion.expect('&gt;') connexion.sendline('exit') connexion.close() </code></pre> <p>I get the error : </p> <pre><code>Traceback (most recent call last): File "script.py", line 21, in &lt;module&gt; connexion.expect('&gt;') File "/usr/lib/python2.7/dist-packages/pexpect/__init__.py", line 1418, in expect timeout, searchwindowsize) File "/usr/lib/python2.7/dist-packages/pexpect/__init__.py", line 1433, in expect_list timeout, searchwindowsize) File "/usr/lib/python2.7/dist-packages/pexpect/__init__.py", line 1521, in expect_loop raise EOF(str(err) + '\n' + str(self)) pexpect.EOF: End Of File (EOF). Exception style platform. &lt;pexpect.spawn object at 0x7fcfeecee750&gt; version: 3.1 command: /usr/bin/ssh args: ['/usr/bin/ssh', '-o', 'StrictHostKeyChecking=no', 'username@10.9.128.5'] searcher: &lt;pexpect.searcher_re object at 0x7fcfeecee850&gt; buffer (last 100 chars): '' before (last 100 chars): ' connect to host 10.9.128.5 port 22: No route to host\r\r\npassword\r\n\r\n' </code></pre> <p>Thanks</p>
-1
2016-08-07T22:31:03Z
38,824,679
<p>The problem is that the host <code>10.9.128.5</code> was not reachable at the moment. <code>ssh</code> has returned this message:</p> <pre><code>connect to host 10.9.128.5 port 22: No route to host </code></pre> <p>And not that, what was expected.</p>
0
2016-08-08T08:40:29Z
[ "python", "shell", "ssh", "pexpect" ]
Python Script Pexpect SSH
38,819,147
<p>I'm trying to do a Python script (with a loop) to connect in SSH,it works if everything is good ( password and route) but stop when it's not working(wrong password or no routes to host ).Here is the important part of the script, how can I control if everything is working ? </p> <pre><code>connexion = pexpect.spawn("ssh -o StrictHostKeyChecking=no "+user+"@" + ip ) index=connexion.expect(':') connexion.sendline(password + "\r") connexion.expect('&gt;') connexion.sendline('show clock \r') connexion.expect('&gt;') connexion.sendline('exit') connexion.close() </code></pre> <p>I get the error : </p> <pre><code>Traceback (most recent call last): File "script.py", line 21, in &lt;module&gt; connexion.expect('&gt;') File "/usr/lib/python2.7/dist-packages/pexpect/__init__.py", line 1418, in expect timeout, searchwindowsize) File "/usr/lib/python2.7/dist-packages/pexpect/__init__.py", line 1433, in expect_list timeout, searchwindowsize) File "/usr/lib/python2.7/dist-packages/pexpect/__init__.py", line 1521, in expect_loop raise EOF(str(err) + '\n' + str(self)) pexpect.EOF: End Of File (EOF). Exception style platform. &lt;pexpect.spawn object at 0x7fcfeecee750&gt; version: 3.1 command: /usr/bin/ssh args: ['/usr/bin/ssh', '-o', 'StrictHostKeyChecking=no', 'username@10.9.128.5'] searcher: &lt;pexpect.searcher_re object at 0x7fcfeecee850&gt; buffer (last 100 chars): '' before (last 100 chars): ' connect to host 10.9.128.5 port 22: No route to host\r\r\npassword\r\n\r\n' </code></pre> <p>Thanks</p>
-1
2016-08-07T22:31:03Z
38,900,097
<p>You are getting an exception which needs to be handled correctly. Below is some code that you can use as an example to pass the exception and log it.</p> <pre><code>def ignore_exception_out(conn, text, timeout=10): try: conn.expect(text, timeout) except Exception as e: logging.log("Exception reached {0}".format(e)) pass </code></pre>
2
2016-08-11T15:08:56Z
[ "python", "shell", "ssh", "pexpect" ]
pyinstaller not working with lxml
38,819,168
<p>I am trying to create an exe using pyinstaller but keep running into this error</p> <pre><code>Traceback (most recent call last): File "runautojama.py", line 28, in &lt;module&gt; File "c:\python27\Lib\site-packages\PyInstaller\loader\pyimod03_importers.py", line 389, in load_module exec(bytecode, module.__dict__) File "autogen\jama_lib.py", line 24, in &lt;module&gt; from lxml import html as htmlLib File "c:\python27\Lib\site-packages\PyInstaller\loader\pyimod03_importers.py", line 389, in load_module exec(bytecode, module.__dict__) File "site-packages\lxml\html\__init__.py", line 54, in &lt;module&gt; File "c:\python27\Lib\site-packages\PyInstaller\loader\pyimod03_importers.py", line 546, in load_module module = imp.load_module(fullname, fp, filename, ext_tuple) ImportError: DLL load failed: A dynamic link library (DLL) initialization routine failed. Failed to execute script runautojama </code></pre> <p>These are due to the import statements</p> <pre><code>from lxml import etree from lxml import html </code></pre> <p>I am using lxml==3.6.0</p> <p>I created hooks for lxml.etree like this</p> <pre><code>import os.path import glob from PyInstaller.compat import EXTENSION_SUFFIXES from PyInstaller.utils.hooks import collect_submodules, get_module_file_attribute hiddenimports = collect_submodules('lxml') binaries = [] lxml_dir = os.path.dirname(get_module_file_attribute('lxml')) for ext in EXTENSION_SUFFIXES: lxmlMods = glob.glob(os.path.join(lxml_dir, '*%s*' % ext)) for f in lxmlMods: binaries.append((f, 'lxml')) </code></pre> <p>And a simple hook for lxml.html</p> <pre><code>from PyInstaller.utils.hooks import collect_submodules, get_module_file_attribute hiddenimports = collect_submodules('lxml.html') </code></pre> <p>It copies the etree.pyd and other pyd's to the path but it still doesnt seem to work and I still get the same error. Can you suggest what I might be doing wrong.</p> <p>My spec file:</p> <pre><code>a = Analysis(['..\\runautojama.py'], pathex=['scripts\\pyinstallerfiles'], binaries=None, datas=None, hiddenimports=['collections', 'urlparse', 'lxml._elementpath', 'functools', '__future__', 'gzip', 'lxml.html', 'lxml.includes'], hookspath=['scripts\pyinstallerfiles\hooks'], runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher) </code></pre>
0
2016-08-07T22:34:25Z
38,834,030
<p>@WombatPM thanks for pointing it out. The problem gets resolved using the dev version <code>3.3.dev0+gd3e0cb4</code></p>
0
2016-08-08T16:14:57Z
[ "python", "lxml", "pyinstaller" ]
Scipy - Error while using spherical Bessel functions
38,819,303
<p>I'm trying to draw plots in Python with Scipy module. According to <a href="http://docs.scipy.org/doc/scipy/reference/special.html" rel="nofollow">http://docs.scipy.org/doc/scipy/reference/special.html</a> I wrote code with <code>scipy.special.spherical_jn(n,x,0)</code>:</p> <pre><code>import matplotlib.pyplot as plt import numpy as np import scipy.special as sp from matplotlib import rcParams rcParams.update({'figure.autolayout': True}) def odrazTE(a,o,d): temp1 = sp.spherical_jn[1,a,0] temp2 = 1 return abs(temp1/temp2)**2 t = np.arange(0.001, 2, 0.001) plt.plot(t,odrazTE(t,t,1),label='TE1') plt.show() </code></pre> <p>While I'm compiling the program, all I get is this error:</p> <pre><code>Traceback (most recent call last): File "standing-sphere.py", line 33, in &lt;module&gt; plt.plot(t,odrazTE(t,t,1),label='TE1') File "standing-sphere.py", line 15, in odrazTE temp1 = sp.spherical_jn[1,a,0] AttributeError: 'module' object has no attribute 'spherical_jn' </code></pre> <p>There is way how to do it with regular Bessel function and relationship between Bessel and spherical Bessel function, but I don't like this solution because of derivative of sph.bess. function that I need too.</p> <p>Is there any chance I have set something wrongly and it can be fixed to scipy.special.spherical_jn work?</p>
0
2016-08-07T22:56:08Z
38,819,851
<p><a href="http://docs.scipy.org/doc/scipy/reference/generated/scipy.special.spherical_jn.html" rel="nofollow"><code>scipy.special.spherical_jn</code></a> was added in scipy version 0.18.0, which was released on July 25, 2016. My guess is you are using an older version of scipy. To check, run</p> <pre><code>import scipy print(scipy.__version__) </code></pre>
2
2016-08-08T00:34:16Z
[ "python", "scipy", "bessel-functions" ]
Unable to instantiate StanfordNERTagger on OS X
38,819,371
<p>I am trying to instantiate <strong><code>StanfordNERTagger</code></strong>. This is what I am trying:</p> <pre><code>st = StanfordNERTagger("/Users/attitude/Desktop/english.all.3class.caseless.distsim.crf.ser.gz","/Users/attitude/Desktop/stanford-ner-2015-12-09/stanford-ner.jar") </code></pre> <p>I have set the <code>CLASSPATH</code> variable to <code>/Users/attitude/Desktop/stanford-ner-2015-12-09/stanford-ner.jar</code> (I also tried just the parent folder as value - <code>/Users/attitude/Desktop/stanford-ner-2015-12-09</code>).</p> <p>However, I am getting this error:</p> <p><code>LookupError: Could not find stanford-ner.jar jar file at /Users/attitude/Desktop/stanford-ner-2015-12-09/stanford-ner.jar</code>.</p> <p>I have done everything mentioned in these two answers - <a href="http://stackoverflow.com/questions/32819573/nltk-why-does-nltk-not-recognize-the-classpath-variable-for-stanford-ner">this</a> and <a href="https://stackoverflow.com/questions/32652725/importerror-cannot-import-name-stanfordnertagger-in-nltk">this</a>. What else do I do now to fix this error?</p> <p>OS X Yosemite - Python 2.7.</p>
0
2016-08-07T23:05:19Z
38,819,496
<p><strong>TL;DR</strong>:</p> <p>Without setting environmental variable, use the keywords arguments, <code>model_filename</code> and <code>path_to_jar</code></p> <pre><code>from nltk.tag import StanfordNERTagger stanford_ner_dir = '/home/alvas/stanford-ner/' eng_model_filename= stanford_ner_dir + 'classifiers/english.all.3class.distsim.crf.ser.gz' my_path_to_jar= stanford_ner_dir + 'stanford-ner.jar' st = StanfordNERTagger(model_filename=eng_model_filename, path_to_jar=my_path_to_jar) st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) </code></pre> <hr> <p><strong>In long:</strong> </p> <p>See <a href="http://stackoverflow.com/a/34112695/610569">http://stackoverflow.com/a/34112695/610569</a></p>
1
2016-08-07T23:26:37Z
[ "python", "nltk", "osx-yosemite", "stanford-nlp", "pos-tagger" ]
Count repeating elements in a CSV column
38,819,444
<p>I'm currently trying to get used to Python and am trying to read a column and count the repeating elements from the strings.</p> <p>What I have so far:</p> <pre><code>i = 0 y = [i] fav_Split = row[9].split('|') #print fav_Split for x in fav_Split: #print x y[i] = x i = i+1 </code></pre> <p><code>print fav_Split</code> gives me:</p> <pre><code>['XA', 'MA'] ['BA', 'MA', 'RA'] ['EB', 'CB'] ['JA', 'RB', 'RA'] ['ED', 'EA', 'RB', 'KA', 'RA'] ['SB'] ['MA', 'ED', 'SB'] ['EC'] ['FA'] ['RA', 'MA'] ['RB'] ['CB'] ['AC'] ['CB', 'MA', 'RA'] ['AC'] ['KA'] ['SA', 'CB'] ['BA', 'ED', 'MA'] ['KA', 'ED', 'SA'] ['AB'] ['CA'] ['AB'] ['SB', 'MA'] ['XA', 'BA', 'CA'] ['SA', 'AB'] ['SA', 'XA', 'CA'] ['KA'] ['MA', 'XA'] ['MA'] ['RA', 'EC', 'JA'] ['RA'] ['CC', 'RA'] ['CC'] ['CC', 'RA'] ['SA', 'RA'] ['RA', 'AC'] ['XA', 'JA', 'RA'] ['CB', 'KA', 'RA'] ['ZZ'] ['CB', 'SA', 'RA'] ['MA'] ['LA', 'RB', 'RA'] ['XA', 'RA'] ['GA', 'RA', 'LA'] </code></pre> <p><code>print x</code> gives me:</p> <pre><code>XA MA BA MA RA EB CB JA RB RA ED EA RB KA RA SB MA ED SB EC FA RA MA RB CB AC CB MA RA AC KA SA CB BA ED MA KA ED SA AB CA AB SB MA XA BA CA SA AB SA XA CA KA MA XA MA RA EC JA RA CC RA CC CC RA SA RA RA AC XA JA RA CB KA RA ZZ CB SA RA MA LA RB RA XA RA GA RA LA </code></pre> <p>Which is what I want. I'm trying to read those separate vars into an array and simply count them afterwards. But I am getting an error when trying to read into the y array</p> <pre><code>y[i] = x IndexError: list assignment index out of range </code></pre> <p>Can anyone help me with this? My next steps are counting the repeating elements and displaying the top X (which can be changed based on user input).</p> <p>UPDATE 2: Debugging pic: I don't understand here: y is...not...seeing x as its own element? <img src="http://i.stack.imgur.com/RQCt7.png" alt="enter image description here"></p> <p>UPDATE 3: for y += x <img src="http://i.stack.imgur.com/3oPAJ.png" alt="enter image description here"></p>
0
2016-08-07T23:16:59Z
38,819,527
<p>First, let me give you an improvement to your original code:</p> <pre><code>y = [] fav_Split = row[9].split('|') #print fav_Split for x in fav_Split: y += x </code></pre> <p>Or, even shorter:</p> <pre><code>fav_split = row[9].split('|') y = [item for sublist in fav_Split for item in sublist] </code></pre> <p>Okay, now we have the list of two-letter-codes you wanted. The next step is using the <a href="https://docs.python.org/3/library/collections.html#collections.Counter" rel="nofollow"><code>collections.Counter</code></a> class, which seems to be exactly what you need. You pass a list to it, and it computes the occurences of each element and orders it descending by its occurences.</p> <pre><code>import collections.Counter c = collections.Counter(y) </code></pre> <p>To get the <code>n</code> most common elements from your list, you can simply use <a href="https://docs.python.org/3/library/collections.html#collections.Counter.most_common" rel="nofollow"><code>most_common()</code></a></p> <pre><code>c.most_common(n) </code></pre> <p>which will </p> <blockquote> <p>Return a list of the n most common elements and their counts from the most common to the least.</p> </blockquote>
1
2016-08-07T23:32:09Z
[ "python", "arrays", "list" ]
Jinja2 Flask Python button across many pages
38,819,551
<p>Using flasks render template function, you can sculpt complicated pages quickly. I want to have a small "up vote" "module" .html file that encases all the scripts and html needed to get the button working.</p> <p>In jinja you can use the "extends to" tag that inserts the current section into the section called by the extend, but can you CALL a module from any template?</p> <p>So a 1 to many relationship instead of a many to 1.</p> <p>I could insert all the code for the button in the pages I want but I feel it would be better practice - and a lot faster - to have this small module encasing all the things I need. I could also make changes to this module alone which would be inherited by the entire project.</p> <p>Sort of like a website embed.</p> <p>I've thought of a couple of ways to do so, like parsing the info for the button into a variable for the render_template function, but which would you recommend doing?</p>
0
2016-08-07T23:36:02Z
38,819,574
<p>You should use the <a href="http://jinja.pocoo.org/docs/dev/templates/#include" rel="nofollow"><code>include</code></a> statement. This is similar to using <code>import</code> in plain Python.</p> <p>However, note that Jinja2 also has an <code>import</code> statement. This is different than <code>include</code> in that <code>import</code> is oriented towards using macros and executable code from a file; nothing is rendered. So, if you're separating common tasks that you do with Jinja2 statements, you can export those into macros in files that you then <code>import</code> in your templates.</p> <p>However, when you want to include rendered output, which is what you're doing, stick to <code>include</code>.</p>
1
2016-08-07T23:41:06Z
[ "python", "flask", "jinja2" ]
Use requests module in Python to log in to Barclays premier league fantasy football?
38,819,570
<p>I'm trying to write a Python script to let me log in to my fantasy football account at <a href="https://fantasy.premierleague.com/">https://fantasy.premierleague.com/</a>, but something is not quite right with my log in. When I login through my browser and check the details using Chrome developer tools, I find that the Request URL is <a href="https://users.premierleague.com/accounts/login/">https://users.premierleague.com/accounts/login/</a> and the form data sent is:</p> <pre class="lang-none prettyprint-override"><code>csrfmiddlewaretoken:[My token] login:[My username] password:[My password] app:plfpl-web redirect_uri:https://fantasy.premierleague.com/a/login </code></pre> <p>There are also a number of Request headers:</p> <pre><code>Accept:text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8 Accept-Encoding:gzip, deflate, br Accept-Language:en-US,en;q=0.8 Cache-Control:max-age=0 Connection:keep-alive Content-Length:185 Content-Type:application/x-www-form-urlencoded Cookie:[My cookies] Host:users.premierleague.com Origin:https://fantasy.premierleague.com Referer:https://fantasy.premierleague.com/ Upgrade-Insecure-Requests:1 User-Agent:Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 </code></pre> <p>So I've written a short Python script using the request library to try to log in and navigate to a page as follows:</p> <pre><code>import requests with requests.Session() as session: url_home = 'https://fantasy.premierleague.com/' html_home = session.get(url_home) csrftoken = session.cookies['csrftoken'] values = { 'csrfmiddlewaretoken': csrftoken, 'login': &lt;My username&gt;, 'password': &lt;My password&gt;, 'app': 'plfpl-web', 'redirect_uri': 'https://fantasy.premierleague.com/a/login' } head = { 'Host':'users.premierleague.com', 'Referer': 'https://fantasy.premierleague.com/', } session.post('https://users.premierleague.com/accounts/login/', data = values, headers = head) url_transfers = 'https://fantasy.premierleague.com/a/squad/transfers' html_transfers = session.get(url_transfers) print(html_transfers.content) </code></pre> <p>On printing out the content of my post request, I get a HTML response code 500 error with:</p> <pre><code>b'\n&lt;html&gt;\n&lt;head&gt;\n&lt;title&gt;Fastly error: unknown domain users.premierleague.com&lt;/title&gt;\n&lt;/head&gt;\n&lt;body&gt;\nFastly error: unknown domain: users.premierleague.com. Please check that this domain has been added to a service.&lt;/body&gt;&lt;/html&gt;' </code></pre> <p>If I remove the 'host' from my head dict, I get a HTML response code 405 error with:</p> <pre><code>b'' </code></pre> <p>I've tried including various combinations of the Request headers in my head dict and nothing seems to work.</p>
9
2016-08-07T23:40:22Z
38,925,310
<p>The following worked for me. I simply removed <code>headers = head</code></p> <pre><code>session.post('https://users.premierleague.com/accounts/login/', data = values) </code></pre> <p>I think you are trying to pick your team programmatically, like me. Your code got me started thanks.</p>
2
2016-08-12T19:34:32Z
[ "python", "session", "login", "request" ]
Multiple sessions and graphs in Tensorflow (in the same process)
38,819,576
<p>I'm training a model where the input vector is the output of another model. This involves restoring the first model from a checkpoint file while initializing the second model from scratch (using <code>tf.initialize_variables()</code>) in the same process. </p> <p>There is a substantial amount of code and abstraction, so I'm just pasting the relevant sections here. </p> <p>The following is the restoring code:</p> <pre><code>self.variables = [var for var in all_vars if var.name.startswith(self.name)] saver = tf.train.Saver(self.variables, max_to_keep=3) self.save_path = tf.train.latest_checkpoint(os.path.dirname(self.checkpoint_path)) if should_restore: self.saver.restore(self.sess, save_path) else: self.sess.run(tf.initialize_variables(self.variables)) </code></pre> <p>Each model is scoped within its own graph and session, like this:</p> <pre><code> self.graph = tf.Graph() self.sess = tf.Session(graph=self.graph) with self.sess.graph.as_default(): # Create variables and ops. </code></pre> <p>All the variables within each model are created within the <code>variable_scope</code> context manager. </p> <p>The feeding works as follows: </p> <ul> <li>A background thread calls <code>sess.run(inference_op)</code> on <code>input = scipy.misc.imread(X)</code> and puts the result in a blocking thread-safe queue. </li> <li>The main training loop reads from the queue and calls <code>sess.run(train_op)</code> on the second model.</li> </ul> <p><strong>PROBLEM:</strong><br> I am observing that the loss values, even in the very first iteration of the training (second model) keep changing drastically across runs (and become nan in a few iterations). I confirmed that the output of the first model is exactly the same everytime. Commenting out the <code>sess.run</code> of the first model and replacing it with identical input from a pickled file does not show this behaviour. </p> <p>This is the <code>train_op</code>: </p> <pre><code> loss_op = tf.nn.sparse_softmax_cross_entropy(network.feedforward()) # Apply gradients. with tf.control_dependencies([loss_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(loss_op) apply_gradient_op = opt.apply_gradients(grads) return apply_gradient_op </code></pre> <p>I know this is vague, but I'm happy to provide more details. Any help is appreciated!</p>
1
2016-08-07T23:41:34Z
38,889,562
<p>The issue is most certainly happening due to concurrent execution of different session objects. I moved the first model's session from the background thread to the main thread, repeated the controlled experiment several times (running for over 24 hours and reaching convergence) and never observed <code>NaN</code>. On the other hand, concurrent execution diverges the model within a few minutes.</p> <p>I've restructured my code to use a common session object for all models. </p>
0
2016-08-11T07:09:55Z
[ "python", "machine-learning", "tensorflow", "deep-learning" ]
Korean txt file encoding with utf-8
38,819,602
<p>I'm trying to process a Korean text file with python, but it fails when I try to encode the file with utf-8. </p> <pre><code>#!/usr/bin/python #-*- coding: utf-8 -*- f = open('tag.txt', 'r', encoding='utf=8') s = f.readlines() z = open('tagresult.txt', 'w') y = z.write(s) z.close </code></pre> <blockquote> <pre><code>============================================================= Traceback (most recent call last): File "C:\Users\******\Desktop\tagging.py", line 5, in &lt;module&gt; f = open('tag.txt', 'r', encoding='utf=8') TypeError: 'encoding' is an invalid keyword argument for this function [Finished in 0.1s] ================================================================== </code></pre> </blockquote> <p>And when I just opens a Korean txt file encoded with utf-8, the fonts are broken like this. What can I do?</p> <blockquote> <p>\xc1\xc1\xbe\xc6\xc1\xf6\xb4\xc2\n', '\xc1\xc1\xbe\xc6\xc7\xcf\xb0\xc5\xb5\xe7\xbf\xe4\n', '\xc1\xc1\xbe\xc6\xc7\xcf\xbd\xc3\xb4\xc2\n', '\xc1\xcb\xbc\xdb\xc7\xd1\xb5\xa5\xbf\xe4\n', '\xc1\xd6\xb1\xb8\xbf\xe4\</p> </blockquote>
0
2016-08-07T23:45:02Z
38,819,716
<p>In Python 2 the <a href="https://docs.python.org/2/library/functions.html#open" rel="nofollow">open function</a> does not take an encoding parameter. Instead you read a line and convert it to unicode. This <a href="https://pythonhosted.org/kitchen/unicode-frustrations.html" rel="nofollow">article</a> on kitchen (as in kitchen sink) modules provides details and some lightweight utilities to work with unicode in python 2.x.</p>
0
2016-08-08T00:08:51Z
[ "python", "encoding", "utf-8" ]
Korean txt file encoding with utf-8
38,819,602
<p>I'm trying to process a Korean text file with python, but it fails when I try to encode the file with utf-8. </p> <pre><code>#!/usr/bin/python #-*- coding: utf-8 -*- f = open('tag.txt', 'r', encoding='utf=8') s = f.readlines() z = open('tagresult.txt', 'w') y = z.write(s) z.close </code></pre> <blockquote> <pre><code>============================================================= Traceback (most recent call last): File "C:\Users\******\Desktop\tagging.py", line 5, in &lt;module&gt; f = open('tag.txt', 'r', encoding='utf=8') TypeError: 'encoding' is an invalid keyword argument for this function [Finished in 0.1s] ================================================================== </code></pre> </blockquote> <p>And when I just opens a Korean txt file encoded with utf-8, the fonts are broken like this. What can I do?</p> <blockquote> <p>\xc1\xc1\xbe\xc6\xc1\xf6\xb4\xc2\n', '\xc1\xc1\xbe\xc6\xc7\xcf\xb0\xc5\xb5\xe7\xbf\xe4\n', '\xc1\xc1\xbe\xc6\xc7\xcf\xbd\xc3\xb4\xc2\n', '\xc1\xcb\xbc\xdb\xc7\xd1\xb5\xa5\xbf\xe4\n', '\xc1\xd6\xb1\xb8\xbf\xe4\</p> </blockquote>
0
2016-08-07T23:45:02Z
38,819,723
<p>I don't know Korean, and don't have sample string to try, but here are some advices for you:</p> <p>1</p> <pre><code>f = open('tag.txt', 'r', encoding='utf=8') </code></pre> <p>You have a typo here, <code>utf-8</code> not <code>utf=8</code>, this explains for the exception you got.</p> <p>The default mode of <code>open()</code> is 'r' so you don't have to define it again.</p> <p>2 Don't just use <code>open</code>, you should use <a href="https://www.python.org/dev/peps/pep-0343/" rel="nofollow">context manager</a> statement to manage the opening/closing file descriptor, like this:</p> <pre><code>with open('tagresult.txt', 'w') as f: f.write(s) </code></pre>
0
2016-08-08T00:10:11Z
[ "python", "encoding", "utf-8" ]
Several variables in if-statement
38,819,648
<p>This works as intended:</p> <pre><code>if c1 in r':6' or c2 in r':6': subst_weight = 1 </code></pre> <p>This doesn't:</p> <pre><code>if (c1 or c2) in r':6': subst_weight = 1 </code></pre> <p>Why and what is the differnce? The goal is to find out whether <code>c1</code> or <code>c2</code> is in the string. Same here:</p> <p>Works:</p> <pre><code>if c1 == '6' or c2 == '6': </code></pre> <p>Doesn't work:</p> <pre><code>if (c1 or c2) == '6': </code></pre> <p>Thanks</p>
2
2016-08-07T23:56:30Z
38,819,667
<p>This expression: </p> <pre><code>if (c1 or c2) in r':6': </code></pre> <p>Evaluates from left to right. Assuming that <code>(c1 or c2)</code> is truthy, it's equivalent to this:</p> <pre><code>if True in r':6': </code></pre> <p>Which obviously is not what you want. If you want it to work for several variables, this will do nicely:</p> <pre><code>if any(c in r':6' for c in (c1, c2, c3, c4)): </code></pre>
1
2016-08-08T00:00:21Z
[ "python", "if-statement", "logical-operators" ]
Several variables in if-statement
38,819,648
<p>This works as intended:</p> <pre><code>if c1 in r':6' or c2 in r':6': subst_weight = 1 </code></pre> <p>This doesn't:</p> <pre><code>if (c1 or c2) in r':6': subst_weight = 1 </code></pre> <p>Why and what is the differnce? The goal is to find out whether <code>c1</code> or <code>c2</code> is in the string. Same here:</p> <p>Works:</p> <pre><code>if c1 == '6' or c2 == '6': </code></pre> <p>Doesn't work:</p> <pre><code>if (c1 or c2) == '6': </code></pre> <p>Thanks</p>
2
2016-08-07T23:56:30Z
38,819,674
<p>It has to do with the precedent given to the operators. The use of ()'s means that <code>c1 or c2</code> is evaluated first. It evaluates to True or False. So your statement converts to </p> <pre><code>if bool in r':g' </code></pre> <p>Which is not what you are looking for.</p>
1
2016-08-08T00:00:45Z
[ "python", "if-statement", "logical-operators" ]
Several variables in if-statement
38,819,648
<p>This works as intended:</p> <pre><code>if c1 in r':6' or c2 in r':6': subst_weight = 1 </code></pre> <p>This doesn't:</p> <pre><code>if (c1 or c2) in r':6': subst_weight = 1 </code></pre> <p>Why and what is the differnce? The goal is to find out whether <code>c1</code> or <code>c2</code> is in the string. Same here:</p> <p>Works:</p> <pre><code>if c1 == '6' or c2 == '6': </code></pre> <p>Doesn't work:</p> <pre><code>if (c1 or c2) == '6': </code></pre> <p>Thanks</p>
2
2016-08-07T23:56:30Z
38,819,676
<p>The statement <code>one or two</code> will return <code>one</code> if defined, and <code>two</code> if <code>one</code> is not truthy, and I don't see a reason it wouldn't work with your logic because if either is truthy, and is contained in string, it will evaluate to <code>True</code></p> <pre><code>&gt;&gt;&gt; a = "6" &gt;&gt;&gt; b = ":" &gt;&gt;&gt; (a or b) '6' &gt;&gt;&gt; (b or a) ':' &gt;&gt;&gt; a = None &gt;&gt;&gt; (a or b) ':' &gt;&gt;&gt; b ':' </code></pre> <p>For more than 2 variables it will return the first that's truthy</p> <pre><code>&gt;&gt;&gt; a = None &gt;&gt;&gt; b = None &gt;&gt;&gt; c = 6 &gt;&gt;&gt; a or b or c 6 </code></pre> <p>But note your logic, you want to check if any of the variables is in that string, you can't use this, because the <code>c1</code> ca be equal to <code>"example"</code> and that's what's gonna be returned to that <code>if</code> statement and it's not in <code>":6"</code></p>
3
2016-08-08T00:00:48Z
[ "python", "if-statement", "logical-operators" ]
Several variables in if-statement
38,819,648
<p>This works as intended:</p> <pre><code>if c1 in r':6' or c2 in r':6': subst_weight = 1 </code></pre> <p>This doesn't:</p> <pre><code>if (c1 or c2) in r':6': subst_weight = 1 </code></pre> <p>Why and what is the differnce? The goal is to find out whether <code>c1</code> or <code>c2</code> is in the string. Same here:</p> <p>Works:</p> <pre><code>if c1 == '6' or c2 == '6': </code></pre> <p>Doesn't work:</p> <pre><code>if (c1 or c2) == '6': </code></pre> <p>Thanks</p>
2
2016-08-07T23:56:30Z
38,819,694
<p>Because <code>(c1 or c1)</code> evaluates as the value of the first variable, if it is not "false"(which means false, 0 or an empty string) or the value of the second variable. </p> <p>Programming languages don't work like "natural languages".</p>
1
2016-08-08T00:04:22Z
[ "python", "if-statement", "logical-operators" ]
Several variables in if-statement
38,819,648
<p>This works as intended:</p> <pre><code>if c1 in r':6' or c2 in r':6': subst_weight = 1 </code></pre> <p>This doesn't:</p> <pre><code>if (c1 or c2) in r':6': subst_weight = 1 </code></pre> <p>Why and what is the differnce? The goal is to find out whether <code>c1</code> or <code>c2</code> is in the string. Same here:</p> <p>Works:</p> <pre><code>if c1 == '6' or c2 == '6': </code></pre> <p>Doesn't work:</p> <pre><code>if (c1 or c2) == '6': </code></pre> <p>Thanks</p>
2
2016-08-07T23:56:30Z
38,819,719
<p>Although not literally answering your question of why, this is an answer to "how then should I..."</p> <pre><code>if any(x in r':6' for x in [c1, c2]): something </code></pre> <p>If you ever get more of the 'c's, use a variable to define the list beforehand.</p> <p>(sauce: <a href="http://stackoverflow.com/questions/3389574/check-if-multiple-strings-exist-in-another-string">Check if multiple strings exist in another string</a>)</p>
2
2016-08-08T00:09:10Z
[ "python", "if-statement", "logical-operators" ]
How to give raw bytes as parameter to C program
38,819,683
<p>I'm working on an online CTF challenge and I need to somehow give raw bytes to this compiled C program. I've tried the following using python:</p> <pre><code>./program `python -c 'print "\x00\x00"'` </code></pre> <p>... but for some reason that doesn't seem to be giving me what I'm expecting. Is there some conversion/formatting that's happening that I'm not aware of? I would expect this to give raw bytes as an argument.</p>
0
2016-08-08T00:02:33Z
38,819,709
<p>Command line args in C are an array of 0 terminated strings. There is no way to pass "raw bytes" (any 0 byte won't behave as expected).</p> <p>I'd suggest passing either reading the bytes from stdin or from a file specified on the command line.</p>
1
2016-08-08T00:07:34Z
[ "python", "c", "byte" ]
Use of random.randint in Flask
38,819,796
<p>I am playing with Flask to understand it better. I have a simple app that queries a large database and returns a random element. The following code is not working but I know exactly where it fails. It fails when I can random.randint() to get a random element in the list. There is however no error shown in my logs, what is the root cause of this? It works if I use a hardcoded value instead of a random int. I use curl to test it. I snipped the database code as it seems to be correct.</p> <pre><code>from flask import Flask, render_template, request import sqlite3 import random app = Flask(__name__) def show_home_page(): return render_template("home.html") def get_random_element(): # &lt;snipped&gt;: Do some sql queries and populate a list called P_LIST r = random.randint(0, len(P_LIST)) # This line silently fails. r_e = P_LIST[r] # Never seems to get here print "get_random_element", r_e # Never prints this line!! return r_e @app.route('/') def server(): return show_home_page() @app.route('/element', methods=['POST', 'GET']) def random(): if request.method == 'GET': p = request.args.get('q', '') print "Request:", p if p == 'random' or p == '': p = get_random_element() print "Random element:", p else: print "Else:", p return render_template('random.html', element=p) return show_home_page() if __name__ == '__main__': app.run() </code></pre>
-1
2016-08-08T00:24:27Z
38,819,926
<p>It is something I don't understand but here is what is happening. I need to import random inside the random() function. Otherwise the global "import random" statement does not seem to be sufficient. Not sure why. So adding one import line inside random() made it work. If anybody can explain this I would be grateful.</p> <p>EDIT: Now I understand what is going on. The function name random() was causing some sort of conflict. If I change it to rand() everything works fine now with just one global import.</p>
0
2016-08-08T00:50:35Z
[ "python", "random" ]
Use of random.randint in Flask
38,819,796
<p>I am playing with Flask to understand it better. I have a simple app that queries a large database and returns a random element. The following code is not working but I know exactly where it fails. It fails when I can random.randint() to get a random element in the list. There is however no error shown in my logs, what is the root cause of this? It works if I use a hardcoded value instead of a random int. I use curl to test it. I snipped the database code as it seems to be correct.</p> <pre><code>from flask import Flask, render_template, request import sqlite3 import random app = Flask(__name__) def show_home_page(): return render_template("home.html") def get_random_element(): # &lt;snipped&gt;: Do some sql queries and populate a list called P_LIST r = random.randint(0, len(P_LIST)) # This line silently fails. r_e = P_LIST[r] # Never seems to get here print "get_random_element", r_e # Never prints this line!! return r_e @app.route('/') def server(): return show_home_page() @app.route('/element', methods=['POST', 'GET']) def random(): if request.method == 'GET': p = request.args.get('q', '') print "Request:", p if p == 'random' or p == '': p = get_random_element() print "Random element:", p else: print "Else:", p return render_template('random.html', element=p) return show_home_page() if __name__ == '__main__': app.run() </code></pre>
-1
2016-08-08T00:24:27Z
38,819,972
<p>You have redefined <code>random</code> by defining a function named <code>random()</code>.</p> <pre><code>@app.route('/element', methods=['POST', 'GET']) def random(): ... </code></pre> <p>This shadows the imported module, causing the problem that you see. When you <code>import random</code> again in <code>get_random_element()</code> your code can access the <em>module</em> <code>random</code> instead of the local function <code>random()</code>.</p> <p>Fix this by renaming the function, perhaps call it <code>element()</code> since that is the route name.</p>
0
2016-08-08T01:01:00Z
[ "python", "random" ]
How should I go about geolocating 1,100,000 lines of coordinate information?
38,819,915
<p>Okay, so I'm trying to envision a solution for this. I have a database with over a million lines which includes a city name in the US and a set of coordinates for that city. The problem is that there are multiple cities with the same name: Springfield, NJ and Springfield, MA, for example. So I need to get the state information.</p> <p>There are also duplicates within the data. There are only about 6500 sets of unique coordinates, so conceivably, I could locate those and then assign them to the other entries in the database. Is this a feasible plan? How would I go about this?</p> <p>Here are some examples of what entries in this database look like:</p> <pre><code>2015-09-01 00:00:00,Buffalo,"42.9405299,-78.8697906",10.1016/s0894-7317(12)00840-1,42.9405299,-78.8697906,43.0,-79.0 2015-09-01 00:00:00,New York,"40.7830603,-73.9712488",10.1016/j.jmv.2014.04.008,40.783060299999995,-73.9712488,41.0,-74.0 2015-09-01 00:00:04,Scottsdale,"33.4941704,-111.9260519",10.1016/j.euroneuro.2014.05.008,33.494170399999994,-111.9260519,33.0,-112.0 2015-09-01 00:00:09,Provo,"40.2338438,-111.6585337",10.1016/j.toxac.2014.07.002,40.233843799999995,-111.6585337,40.0,-112.0 2015-09-01 00:00:13,New York,"40.7830603,-73.9712488",10.1016/j.drugalcdep.2014.09.015,40.783060299999995,-73.9712488,41.0,-74.0 2015-09-01 00:00:16,Fremont,"37.5482697,-121.9885719",10.1016/j.ajic.2012.04.160,37.548269700000006,-121.98857190000001,38.0,-122.0 2015-09-01 00:00:24,Provo,"40.2338438,-111.6585337",10.1016/j.chroma.2015.01.036,40.233843799999995,-111.6585337,40.0,-112.0 </code></pre> <p>I am using the <a href="https://pypi.python.org/pypi/geocoder" rel="nofollow">geocoder</a> package for geolocation. Here is some code I've written that could handle that:</p> <pre><code>def convert_to_state(lati, long): lat, lon = float(lati), float(long) g = geocoder.google([lat, lon], method='reverse') state_long, state_short = g.state_long, g.state return state_long, state_short </code></pre> <p>I'm just not sure how to do this. Turns out geocoding is pretty expensive, so using the duplicates is probably the best way forward. Any suggestions for how to accomplish that?</p>
1
2016-08-08T00:48:29Z
38,820,048
<p>Almost certainly the best way to avoid doing extra work will be to use a hash table to check if something already had a mapping:</p> <pre><code>processed_coords = {} def convert_to_state(lati, long): lat, lon = float(lati), float(long) if (lat, lon) not in processed_coords: g = geocoder.google([lat, lon], method='reverse') state_long, state_short = g.state_long, g.state processed_coords[(lat,lon)] = (state_long, state_short) return state_long, state_short else: return processed_coords[(lat,lon)] </code></pre> <p>This way you do a simple O(1) check to see if you already have the data, which isn't much extra calculation at all, and you don't redo the work if you indeed have already done it.</p> <p>If you're right and there's only 6500 sets of unique coordinates, you should be fine in terms of memory usage for this technique, but if you're wrong and there are more unique ones, you may run into some memory issues if more of those million something are unique.</p>
2
2016-08-08T01:12:32Z
[ "python", "pandas", "geolocation", "google-geocoder" ]
How should I go about geolocating 1,100,000 lines of coordinate information?
38,819,915
<p>Okay, so I'm trying to envision a solution for this. I have a database with over a million lines which includes a city name in the US and a set of coordinates for that city. The problem is that there are multiple cities with the same name: Springfield, NJ and Springfield, MA, for example. So I need to get the state information.</p> <p>There are also duplicates within the data. There are only about 6500 sets of unique coordinates, so conceivably, I could locate those and then assign them to the other entries in the database. Is this a feasible plan? How would I go about this?</p> <p>Here are some examples of what entries in this database look like:</p> <pre><code>2015-09-01 00:00:00,Buffalo,"42.9405299,-78.8697906",10.1016/s0894-7317(12)00840-1,42.9405299,-78.8697906,43.0,-79.0 2015-09-01 00:00:00,New York,"40.7830603,-73.9712488",10.1016/j.jmv.2014.04.008,40.783060299999995,-73.9712488,41.0,-74.0 2015-09-01 00:00:04,Scottsdale,"33.4941704,-111.9260519",10.1016/j.euroneuro.2014.05.008,33.494170399999994,-111.9260519,33.0,-112.0 2015-09-01 00:00:09,Provo,"40.2338438,-111.6585337",10.1016/j.toxac.2014.07.002,40.233843799999995,-111.6585337,40.0,-112.0 2015-09-01 00:00:13,New York,"40.7830603,-73.9712488",10.1016/j.drugalcdep.2014.09.015,40.783060299999995,-73.9712488,41.0,-74.0 2015-09-01 00:00:16,Fremont,"37.5482697,-121.9885719",10.1016/j.ajic.2012.04.160,37.548269700000006,-121.98857190000001,38.0,-122.0 2015-09-01 00:00:24,Provo,"40.2338438,-111.6585337",10.1016/j.chroma.2015.01.036,40.233843799999995,-111.6585337,40.0,-112.0 </code></pre> <p>I am using the <a href="https://pypi.python.org/pypi/geocoder" rel="nofollow">geocoder</a> package for geolocation. Here is some code I've written that could handle that:</p> <pre><code>def convert_to_state(lati, long): lat, lon = float(lati), float(long) g = geocoder.google([lat, lon], method='reverse') state_long, state_short = g.state_long, g.state return state_long, state_short </code></pre> <p>I'm just not sure how to do this. Turns out geocoding is pretty expensive, so using the duplicates is probably the best way forward. Any suggestions for how to accomplish that?</p>
1
2016-08-08T00:48:29Z
38,820,762
<p>I like the hash table idea, but here is an alternative using some pandas stuff: </p> <p>1) get a unique list of (lat, lon) coords</p> <pre><code>df['latlon'] = [(x,y) for x,y in zip(df['lati'].tolist(),df['long'].tolist())] unique_ll = df['latlon'].unique() </code></pre> <p>2) loop through unique coords and set the state for all equivalent lines</p> <pre><code>for l in unique_ll: df.loc[df['latlon'] == l, 'state'] = convert_to_state(l[0],l[1]) </code></pre>
1
2016-08-08T03:10:41Z
[ "python", "pandas", "geolocation", "google-geocoder" ]
How should I go about geolocating 1,100,000 lines of coordinate information?
38,819,915
<p>Okay, so I'm trying to envision a solution for this. I have a database with over a million lines which includes a city name in the US and a set of coordinates for that city. The problem is that there are multiple cities with the same name: Springfield, NJ and Springfield, MA, for example. So I need to get the state information.</p> <p>There are also duplicates within the data. There are only about 6500 sets of unique coordinates, so conceivably, I could locate those and then assign them to the other entries in the database. Is this a feasible plan? How would I go about this?</p> <p>Here are some examples of what entries in this database look like:</p> <pre><code>2015-09-01 00:00:00,Buffalo,"42.9405299,-78.8697906",10.1016/s0894-7317(12)00840-1,42.9405299,-78.8697906,43.0,-79.0 2015-09-01 00:00:00,New York,"40.7830603,-73.9712488",10.1016/j.jmv.2014.04.008,40.783060299999995,-73.9712488,41.0,-74.0 2015-09-01 00:00:04,Scottsdale,"33.4941704,-111.9260519",10.1016/j.euroneuro.2014.05.008,33.494170399999994,-111.9260519,33.0,-112.0 2015-09-01 00:00:09,Provo,"40.2338438,-111.6585337",10.1016/j.toxac.2014.07.002,40.233843799999995,-111.6585337,40.0,-112.0 2015-09-01 00:00:13,New York,"40.7830603,-73.9712488",10.1016/j.drugalcdep.2014.09.015,40.783060299999995,-73.9712488,41.0,-74.0 2015-09-01 00:00:16,Fremont,"37.5482697,-121.9885719",10.1016/j.ajic.2012.04.160,37.548269700000006,-121.98857190000001,38.0,-122.0 2015-09-01 00:00:24,Provo,"40.2338438,-111.6585337",10.1016/j.chroma.2015.01.036,40.233843799999995,-111.6585337,40.0,-112.0 </code></pre> <p>I am using the <a href="https://pypi.python.org/pypi/geocoder" rel="nofollow">geocoder</a> package for geolocation. Here is some code I've written that could handle that:</p> <pre><code>def convert_to_state(lati, long): lat, lon = float(lati), float(long) g = geocoder.google([lat, lon], method='reverse') state_long, state_short = g.state_long, g.state return state_long, state_short </code></pre> <p>I'm just not sure how to do this. Turns out geocoding is pretty expensive, so using the duplicates is probably the best way forward. Any suggestions for how to accomplish that?</p>
1
2016-08-08T00:48:29Z
39,775,543
<p>There's a geo-info service SmartyStreets that has a list tool that processes lists of searches and returns a bunch of information (can upload a spreadsheet or copy and paste). They focus on address validation so they expect search terms to be addresses, however, it can match just zip codes to cities and states. Do you have access to that info?</p> <p>Here's a <a href="https://smartystreets.com/demo/list" rel="nofollow">link to the demo</a>.</p>
0
2016-09-29T16:28:34Z
[ "python", "pandas", "geolocation", "google-geocoder" ]
Exporting values from a Spreadsheet using Python for webscraping (BeautifulSoup4)
38,819,951
<p>A. My Objective: Use Python to extract unique OCPO IDs from an Excel Spreadsheet and using these IDs to web-scrape for corresponding company names and NIN IDs. (Note: Both NIN and OCPO IDs are unique to one company).</p> <p>B. Details: i. Extract OCPO IDs from an Excel Spreadsheet using openpyxl. ii. Search OCPO IDs one-by-one in a business registry (<a href="https://focus.kontur.ru/" rel="nofollow">https://focus.kontur.ru/</a>) and find corresponding company names and company IDs (NIN) using BeautifulSoup4.</p> <blockquote> <p>Example: A search for OCPO ID "00044428" yields a matching company name ПАО "НК "РОСНЕФТЬ" and corresponding NIN ID "7706107510."</p> </blockquote> <ol start="3"> <li>Save in Excel the list of company names and NIN IDs.</li> </ol> <p>C. My progress: i. I'm able to extract the list of OCPO IDs from Excel to Python. </p> <pre><code># Pull the Packages import openpyxl import requests import sys from bs4 import BeautifulSoup # Pull OCPO from the Spreadsheet wb = openpyxl.load_workbook(r"C:\Users\ksong\Desktop\book1.xlsx") sheet = wb.active sheet.columns[0] for cellobjc in sheet.columns[0]: print(cellobjc.value) </code></pre> <p>ii. I'm able to search an OCPO ID and let Python scrape matching company name and corresponding company NIN ID.</p> <pre><code># Part 1a: Pull the Website r = requests.get("https://focus.kontur.ru/search?query=" + "00044428") r.encoding = "UTF-8" # Part 1b: Pull the Content c = r.content soup = BeautifulSoup(c, "html.parser", from_encoding="UTF-8") # Part 2a: Pull Company name name = soup.find("a", attrs={'class':"js-subject-link"}) name_box = name.text.strip() print(name_box) </code></pre> <p>D. Help</p> <p>i. How do you code so that loop each OCPO IDs are searched individually as a loop so that I don't get a list of OCPOs IDs but instead a list of search results? In other words, each OCPO is searched and matched with corresponding Company Name and NIN ID. This loop would have to be fed as ######## ("<a href="https://focus.kontur.ru/search?query=" rel="nofollow">https://focus.kontur.ru/search?query=</a>" + "########"). </p> <p>ii. Also, what code should I use for Python to save all the search results in an Excel Spreadsheet? </p>
0
2016-08-08T00:56:10Z
38,831,777
<p>1) Create an empty workbook to write to:</p> <pre><code>wb2 = Workbook() ws1 = wb2.active </code></pre> <p>2) Put all that code in the 2nd box into your for loop from the first box.</p> <p>3) Change "00044428" to str(cellobjc.value)</p> <p>4) At the end of each loop, append your row to the new worksheet:</p> <pre><code>row = [cellobjc.value, date_box, other_variables] ws1.append(row) </code></pre> <p>5) After the loop finishes, save your file</p> <pre><code>wb2.save("results.xlsx") </code></pre>
0
2016-08-08T14:24:18Z
[ "python", "python-3.x", "web-scraping", "beautifulsoup", "openpyxl" ]
Vader Sentiment values not coming out correctly
38,819,968
<p>I'm using vader sentiment to analyze CSV files full of tweets. I'm try to get an average for each of the sentiment values for the entire set of Tweets. For some reason, many of the results tend to come out with values of 0 which doesn't make sense unless it's a rounding issue. Also len(result[i]) always returns 1. My code is as follows:</p> <pre><code>for row in csv.reader(csv_file): result = { 'pos':[] , 'neg':[], 'compound':[], 'neu':[] } sentences = row f = open(file_name[0], 'a', newline='') writer = csv.writer(f) for sentence in sentences: vs = vaderSentiment(sentence) row = [sentence, str(vs)] writer.writerow(row) result['pos'].append(vs['pos']) result['neg'].append(vs['neg']) result['compound'].append(vs['compound']) result['neu'].append(vs['neu']) print('sum:') for i in result.keys(): print('\t',i, '=&gt;', float(sum(result[i]))/len(result[i])) f.close() </code></pre>
-1
2016-08-08T01:00:26Z
38,820,398
<p>Nevermind, I realized I was stupidly placing result = { 'pos':[] , 'neg':[], 'compound':[], 'neu':[] } where I shouldn't have so it was clearing the data at each iteration of the loop. I can't believe that took me so long to figure out...</p>
0
2016-08-08T02:11:27Z
[ "python", "sentiment-analysis" ]
Python slow on for-loops and hundreds of attribute lookups. Use Numba?
38,820,040
<p>i am working on a simple showcase SPH (smoothed particle hydrodynamics, not relevant here though) implementation in python. The code works, but the execution is kinda sluggish. I often have to compare individual particles with a certain amount of neighbours. In an earlier implementation i kept all particle positions and all distances-to-each-existing-particle in large numpy arrays -> to a certain point this was pretty fast. But visually not pleasing and n**2. Now i want it clean and simple with classes + kdTree to speed up the neighbour search.</p> <p>this all happens in my global Simulation-Class. Additionally there's a class called "particle" that contains all individual informations. i create hundreds of instances before and loop through them.</p> <pre><code> def calculate_density(self): #Using scipys advanced nearest neighbour seach magic tree = scipy.spatial.KDTree(self.particle_positions) #here we go... loop through all existing particles. set attributes.. for particle in self.my_particles: #get the indexes for the nearest neighbours particle.index_neighbours = tree.query_ball_point(particle.position,self.h,p=2) #now loop through the list of neighbours and perform some additional math particle.density = 0 for neighbour in particle.index_neighbours: r = np.linalg.norm(particle.position - self.my_particles[neighbour].position) particle.density += particle.mass * (315/(64*math.pi*self.h**9)) *(self.h**2-r**2)**3 </code></pre> <p>i timed 0.2717630863189697s for only 216 particles.</p> <p>Now i wonder: what to do to speed it up? Most tools online like "Numba" show how they speed up math-heavy individual functions. I dont know which to choose. On a sidenode, i cannot even get Numba to work in this case. I get a looong error message. And i hoped it is as simple as slapping "@jit" in front of it.</p> <p>I know its the loops with the attribute calls that crush my performance anyway - not the math or the neighbour search. Sadly iam a novice to programming and i liked the clean approach i got to work here :( any thoughts?</p>
1
2016-08-08T01:11:47Z
38,820,116
<p>These kind of loop-intensive calculations are slow in Python. In these cases, the first thing you want to do is to see if you can vectorize these operations and get rid of the loops. Then actual calculations will be done in C or Fortran libraries and you will get a lot of speed up. If you can do it usually this is the way to go, since it is much easier to maintain your code.</p> <p>Some operations, however, are just inherently loop-intensive. In these cases using Cython will help you a lot - you can usually expect 60X+ speed up when you cythonize your loop. I also had similar experiences with numba - when my function becomes complicated, it failed to make it faster, so usually I just use Cython.</p> <p>Coding in Cython is not too bad - much easier than actually code in C because you can access numpy arrays easily via memoryviews. Another advantage is that it is pretty easy to parallelize the loop with openMP, which can gives you additional 4X+ speedups (of course, depending on the number of cores you have in your machine), so your code can be hundreds times faster.</p> <p>One issue is that to get the optimal speed, you have to remove all the python calls inside your loop, which means you cannot call numpy/scipy functions. So you have to convert <code>tree.query_ball_point</code> and <code>np.linalg.norm</code> part to Cython for optimal speed. </p>
2
2016-08-08T01:26:22Z
[ "python", "jit", "numba", "kdtree" ]
creating sparse matrix of unknown size
38,820,079
<p>I have a text file with each line indicating an edge on a graph, for example</p> <pre><code>2 5 1 </code></pre> <p>indicates an edge of weight 1 between nodes 2 and 5. I want to create a sparse adjacency matrix using these tuples. Typically, I'd initialize a sparse matrix as</p> <pre><code>G = scipy.sparse.lil_matrix((n,n)) </code></pre> <p>where n is the number of nodes in the graph. But in this case, I do not know what 'n' is. Is there a more efficient way to create the matrix than looping over the lines of the file to find the max node index, creating the lil_matrix and then again looping over the file ? My current implementation is this:</p> <pre><code>n = 0 with open(gfile) as f: for line in f: temp = map(int,line.split()) n = np.max([n,temp[0],temp[1]]) G = sp.lil_matrix((n,n)) with open(gfile) as f: for line in f: temp = map(int,line.split()) G[temp[0],temp[1]] = temp[2] </code></pre>
0
2016-08-08T01:20:07Z
38,821,384
<p>The original, and still prototypical, way of creating a sparse matrix is to collect all inputs in <code>row, col, data</code> arrays (or lists), and use <code>coo_matrix</code> to construct the matrix. Shape can be deduced from those inputs (maximum index values), or given as a parameter.</p> <p>To adapt your code</p> <pre><code>row, col, data = [],[],[] with open(gfile) as f: for line in f: temp = map(int,line.split()) # G[temp[0],temp[1]] = temp[2] data.append(temp[2]) row.append(temp[0]) col.append(temp[1]) G = sparse.coo_matrix((data, (row,col)) </code></pre> <p>List appends are at least as fast as line reads, and better than sparse matrix inserts, even <code>lil</code> (<code>lil</code> assignment involves list appends as well).</p> <p>I suspect you could also do:</p> <pre><code>A = np.genfromtxt(gfile, dtype=int) # default white space delimiter # A should now be a 2d 3 column array G = sparse.coo_matrix((A[:,2], (A[:,0], A[:,1])) </code></pre> <p>That is read the whole file with <code>genfromtxt</code> or <code>loadtxt</code> and create the sparse matrix from the resulting columns.</p> <p>(When I made sparse matrices in MATLAB years ago, I used this sort of data, col, row initialization, though with a clever use of indexing to assemble those arrays from finite element blocks without loops.)</p>
1
2016-08-08T04:38:30Z
[ "python", "numpy", "scipy", "sparse-matrix" ]
Flask: AttributeError: 'UnboundField' object has no attribute '__call__'?
38,820,081
<p>Why do I get this error? What is the UnboundField and what do I need to know in order to fix and avoid this problem in the future?</p> <p><strong>Debug output when I visit <a href="http://127.0.0.1:5000/signup" rel="nofollow">http://127.0.0.1:5000/signup</a>:</strong></p> <pre><code>AttributeError AttributeError: 'UnboundField' object has no attribute '__call__' Traceback (most recent call last) File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\app.py", line 2000, in __call__ return self.wsgi_app(environ, start_response) File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\app.py", line 1991, in wsgi_app response = self.make_response(self.handle_exception(e)) File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\app.py", line 1567, in handle_exception reraise(exc_type, exc_value, tb) File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\app.py", line 1988, in wsgi_app response = self.full_dispatch_request() File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\app.py", line 1641, in full_dispatch_request rv = self.handle_user_exception(e) File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\app.py", line 1544, in handle_user_exception reraise(exc_type, exc_value, tb) File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\app.py", line 1639, in full_dispatch_request rv = self.dispatch_request() File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\app.py", line 1625, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "C:\Users\combatmath\Apps\ocrbox\routes.py", line 27, in signup return render_template("signup.html", form=form) File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\templating.py", line 134, in render_template context, ctx.app) File "C:\Users\combatmath\Envs\default\lib\site-packages\flask\templating.py", line 116, in _render rv = template.render(context) File "C:\Users\combatmath\Envs\default\lib\site-packages\jinja2\environment.py", line 989, in render return self.environment.handle_exception(exc_info, True) File "C:\Users\combatmath\Envs\default\lib\site-packages\jinja2\environment.py", line 754, in handle_exception reraise(exc_type, exc_value, tb) File "C:\Users\combatmath\Apps\ocrbox\templates\signup.html", line 1, in top-level template code {% extends "base.html" %} File "C:\Users\combatmath\Apps\ocrbox\templates\base.html", line 9, in top-level template code {% block content %}{% endblock %} File "C:\Users\combatmath\Apps\ocrbox\templates\signup.html", line 30, in block "content" {{ form.submit(class="btn-primary") }} AttributeError: 'UnboundField' object has no attribute '__call__' </code></pre> <p><strong>signup.html:</strong></p> <pre><code>{% extends "base.html" %} {% block title %}Join ocrbox!{% endblock %} {% block content %} &lt;main class="container signup-section"&gt; &lt;div class="section-content"&gt; &lt;h2&gt;Create an account&lt;/h2&gt; &lt;form method="POST" action="/signup"&gt; &lt;div class="form-group"&gt; {{ form.first_name.label }} {{ form.first_name }} &lt;/div&gt; &lt;div class="form-group"&gt; {{ form.last_name.label }} {{ form.last_name }} &lt;/div&gt; &lt;div class="form-group"&gt; {{ form.email.label }} {{ form.email }} &lt;/div&gt; &lt;div class="form-group"&gt; {{ form.password.label }} {{ form.password }} &lt;/div&gt; {{ form.submit(class="btn-primary") }} &lt;/form&gt; &lt;/div&gt; &lt;/main&gt; {% endblock %} </code></pre> <p><strong>routes.py:</strong></p> <pre><code>from flask import Flask, render_template, request from models import db from forms import SignupForm app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost/ocrbox' db.init_app(app) app.secret_key = "development-key" @app.route("/") def index(): return render_template("index.html") @app.route("/about") def about(): return render_template("about.html") @app.route("/signup", methods=['GET', 'POST']) def signup(): form = SignupForm if request.method == 'POST': return "Success!" elif request.method == 'GET': return render_template("signup.html", form=form) if __name__ == "__main__": app.run(debug=True) </code></pre> <p><strong>forms.py:</strong></p> <pre><code>from flask_wtf import Form from wtforms import StringField, PasswordField, SubmitField class SignupForm(Form): first_name = StringField('First name') last_name = StringField('Last name') email = StringField('Email') password = PasswordField('Password') submit = SubmitField('Sign up') </code></pre> <p><strong>models.py:</strong></p> <pre><code>from flask_sqlalchemy import SQLAlchemy from werkzeug import generate_password_hash, check_password_hash db = SQLAlchemy() class User(db.Model): __tablename__ = 'users' uid = db.Column(db.Integer, primary_key=True) firstname = db.Column(db.String(100)) lastname = db.Column(db.String(100)) email = db.Column(db.String(120), unique=True) pwdhash = db.Column(db.String(54)) def __init__(self, firstname, lastname, email, password): self.firstname = firstname.title() self.lastname = lastname.title() self.email = email.lower() self.set_password(password) def set_password(self, password): self.pwdhash = generate_password_hash(password) def check_password(self, password): return check_password_hash(self.pwdhash, password) </code></pre>
-1
2016-08-08T01:20:17Z
38,820,288
<p>In routes.py, I think your <code>signup()</code> method should contain <code>form = SignupForm(request.form)</code>. Otherwise, form is set to a class instead of an instance.</p>
0
2016-08-08T01:54:46Z
[ "python", "flask", "jinja2" ]
How to scale a signal in python on the x axis?
38,820,132
<p>I need a function on python that can take an array and stretch it or compress it by a given rate. For example, lets say that I have an array a=[0,1,0,-1,0] and I want to stretch it by a factor of 2 then the result array would be b=[0,0.5,1,0.5,0,-0.5,-1,-0.5,0]</p> <p><a href="http://i.stack.imgur.com/jHsKE.png" rel="nofollow"><img src="http://i.stack.imgur.com/jHsKE.png" alt="enter image description here"></a></p>
0
2016-08-08T01:28:55Z
38,820,560
<pre><code>def rescale(arr, factor=2): n = len(arr) return np.interp(np.linspace(0, n, factor*n+1), np.arange(n), arr) </code></pre>
1
2016-08-08T02:36:17Z
[ "python", "signal-processing" ]
How to return parents in path, and locate key in JSON
38,820,164
<p>I'm trying to figure out how to find a key called ['text'] and then get the parent keys from the nest. It's the third key that's different, all the other tags are the same.</p> <pre><code> html_data = data['data']['document_data']['dataItem-ihmty5rw']['text'] </code></pre> <p>I'm using this function to get the keys:</p> <pre><code> def printKeysValues(d): for k, v in d.items(): if isinstance(v, dict): printKeysValues(v) else: print("{0} : {1}".format(k, v)) </code></pre> <p>And this function to find the indent:</p> <pre><code> def pretty(d, indent=0): for key, value in d.items(): print('\t' * indent + str(key)) if isinstance(value, dict): pretty(value, indent+1) else: print('\t' * (indent+1) + str(value)) </code></pre>
0
2016-08-08T01:35:33Z
38,820,966
<p>I'm still not entirely sure if this is what you wanted, but if you just want the text per item dictionary, I think this should work:</p> <pre><code># a dictionary of item string to dictionary containing a "text" key item_map = data['data']['document_data'] for item_string, item_map in item_map.iteritems(): print item_string # the item string (e.g. 'dataItem-ihmty5rw') print item_map['text'] # this is the text associated with the item string </code></pre>
0
2016-08-08T03:40:09Z
[ "python", "json", "key" ]
Impossible speedups when using JIT in python. What's going on?
38,820,207
<p>I'm trying to test the speed improvement of using JIT in python. Here's the code I'm using.</p> <pre class="lang-py prettyprint-override"><code>from numba import jit import timeit @jit # Commented out when testing pure python def sumof(): x = 0 for i in xrange(1000000000): x += 1 return x def timer(): sumof() # Run once to initialize the JIT compiler l = [] t = timeit.default_timer() for x in range(10): l.append(sumof()) return timeit.default_timer()-t, l # Returns the time elapsed and the list of results, to verify accuracy print timer() </code></pre> <p>This gives a result similar to this</p> <pre><code>(5.643910299113486e-06, [1000000000, 1000000000, 1000000000, 1000000000, 1000000000, 1000000000, 1000000000, 1000000000, 1000000000, 1000000000]) </code></pre> <p>Now I know raw CPU performance is orders of magnitude faster than pure python, but 10 billion operations in 5 microseconds?? I tested this same code, but using the maximum value of a signed 64 bit integer instead of just a billion. This was the result. </p> <pre><code>(5.643909389618784e-06, [9223372036854775807L, 9223372036854775807L, 9223372036854775807L, 9223372036854775807L, 9223372036854775807L, 9223372036854775807L, 9223372036854775807L, 9223372036854775807L, 9223372036854775807L, 9223372036854775807L]) </code></pre> <p>~ 92 quadrillion operations... in 5 microseconds. By my math, that's about 16 septillion operations a second. Something is obviously wrong, yet it's giving the correct result. I suspect the compiler is getting rid of the for loop somehow, but why? And how can I prevent it?</p>
0
2016-08-08T01:42:28Z
38,820,740
<p>It seems pretty likely that the optimizer has replaced the loop with a single constant calculation. See <a href="https://en.wikipedia.org/wiki/Loop_optimization" rel="nofollow">https://en.wikipedia.org/wiki/Loop_optimization</a> for a list of classic loop optimizations. In this case, completely unrolling the loop, then combining all the constants, results in <code>return n</code> (with x += 1) or <code>return n * b</code> (with x += b). Using <code>x += i</code> results in <code>return n * (n + 1) / 2</code>. (In each case <code>n</code> is the appropriate upper loop bound: when summing <code>i</code> in <code>range(n)</code>, it's really just <code>n-1</code> instead.)</p> <p>Because this is a JIT compiler, it can do this even for variable <code>n</code>, although in your examples each <code>n</code> is a constant, so even non-JIT compilers can do this.</p>
1
2016-08-08T03:07:47Z
[ "python", "jit" ]
Generate categorised data set from list in O(n)
38,820,229
<p>I am trying to generate a data set with the following structure from a list of items with properties <code>prop1</code> and <code>prop2</code>:</p> <p><code>result[p1][p2] =&gt; list of item with prop1=p1 and prop2=p2</code></p> <p>I have been able to do it in O(n2) with:</p> <pre><code>result = { item.prop1: { item.prop2: [ i for i in item_list if i.prop1 == item.prop1 and i.prop2 == item.prop2 ] } for item in item_list } </code></pre> <p>But haven't been able to find a way to do it in less time. Is it possible to achieve this in O(n)?</p>
0
2016-08-08T01:45:27Z
38,820,256
<p>You should only have to iterate over the items once:</p> <pre><code>from collections import defaultdict result = defaultdict(lambda: defaultdict(list)) for item in item_list: result[item.prop1][item.prop2].append(item) </code></pre>
2
2016-08-08T01:50:11Z
[ "python", "algorithm" ]
Generate categorised data set from list in O(n)
38,820,229
<p>I am trying to generate a data set with the following structure from a list of items with properties <code>prop1</code> and <code>prop2</code>:</p> <p><code>result[p1][p2] =&gt; list of item with prop1=p1 and prop2=p2</code></p> <p>I have been able to do it in O(n2) with:</p> <pre><code>result = { item.prop1: { item.prop2: [ i for i in item_list if i.prop1 == item.prop1 and i.prop2 == item.prop2 ] } for item in item_list } </code></pre> <p>But haven't been able to find a way to do it in less time. Is it possible to achieve this in O(n)?</p>
0
2016-08-08T01:45:27Z
38,820,310
<p>A solution using a <code>dict</code> (as opposed to a <code>defaultdict</code>):</p> <pre><code>result = {} for item in item_list: result.setdefault(item.prop1, {}).setdefault(item.prop2, []).append(item) </code></pre>
1
2016-08-08T01:56:48Z
[ "python", "algorithm" ]
Python - Printing and Classes
38,820,311
<p>I've read more posts and documentation on <code>__str__</code> and <code>__repr__</code> than is healthy, consulted various texts, and still cannot resolve this printing issue, so I'm putting this out there. </p> <p>Below is a function I'd like to test. The code isn't mine, but I would like to know how exactly it works. I need to see a human-friendly printout of the results (i.e., no hex), so that I can play with it and generally, well, learn something because I truly have no clue how it's doing what it's doing. </p> <pre><code>def get_ordered_adoption_center_list(adopter, list_of_adoption_centers): """ The method returns a list of an organized adoption_center such that the scores for each AdoptionCenter to the Adopter will be ordered from highest score to lowest score. """ list_of_adoption_centers.sort(key=lambda center:center.name) list_of_adoption_centers.sort(key=lambda center:adopter.get_score(center), reverse=True) return list_of_adoption_centers </code></pre> <p>Below is a relevant cross-section of code that it draws upon, which I did write.</p> <pre><code>import random import string class AdoptionCenter: """ The AdoptionCenter class stores the important information that a client would need to know about, such as the different numbers of species stored, the location, and the name. It also has a method to adopt a pet. """ def __init__(self, name, species_types, location): self.name = name self.species_types = species_types self.location = (float(location[0]), float(location[1])) def get_number_of_species(self, species): return self.species_types.get(species, 0) def get_location(self): return self.location def get_species_count(self): return self.species_types.copy() def get_name(self): return self.name def adopt_pet(self, species): self.species_types[species] = self.species_types[species] - 1 if self.species_types[species] &lt;= 0: del self.species_types[species] def __str__(self): return "%s" % (self.name) class Adopter: """ Adopters represent people interested in adopting a species. They have a desired species type that they want, and their score is simply the number of species that the shelter has of that species. """ def __init__(self, name, desired_species): self.name = name self.desired_species = desired_species def get_name(self): return self.name def get_desired_species(self): return self.desired_species def get_score(self, adoption_center): num_desired = adoption_center.get_number_of_species(self.desired_species) score = float(1 * num_desired) return score def __str__(self): return "%s and score is %d" % (self.name, self.get_score) class FlexibleAdopter(Adopter): """ A FlexibleAdopter still has one type of species that they desire, but they are also alright with considering other types of species. considered_species is a list containing the other species the adopter will consider Their score should be 1x their desired species + .3x all of their desired species """ def __init__(self, name, desired_species, considered_species): Adopter.__init__(self, name, desired_species) self.considered_species = considered_species def get_score(self, adoption_center): num_Other = 0 for animal in self.considered_species: if adoption_center.get_number_of_species(animal) &gt; 0: num_Other += adoption_center.get_number_of_species(animal) adopter_score = Adopter.get_score(self, adoption_center) score = adopter_score + 0.3 * num_Other return score def __str__(self): return "%s and score is %d" % (self.name, self.get_score) class FearfulAdopter(Adopter): """ A FearfulAdopter is afraid of a particular species of animal. If the adoption center has one or more of those animals in it, they will be a bit more reluctant to go there due to the presence of the feared species. Their score should be 1x number of desired species - .3x the number of feared species """ def __init__(self, name, desired_species, feared_species): Adopter.__init__(self, name, desired_species) self.feared_species = feared_species def get_score(self, adoption_center): num_feared = adoption_center.get_number_of_species(self.feared_species) adopter_score = Adopter.get_score(self, adoption_center) score = adopter_score - (0.3 * num_feared) return max(0.0, score) def __str__(self): return "%s and score is %d" % (self.name, self.get_score) class AllergicAdopter(Adopter): """ An AllergicAdopter is extremely allergic to a one or more species and cannot even be around it a little bit! If the adoption center contains one or more of these animals, they will not go there. Score should be 0 if the center contains any of the animals, or 1x number of desired animals if not """ def __init__(self, name, desired_species, allergic_species): Adopter.__init__(self, name, desired_species) self.allergic_species = allergic_species def get_score(self, adoption_center): for animal in self.allergic_species: if animal in adoption_center.get_species_count().keys(): return 0.0 return 1.0 * adoption_center.get_number_of_species(self.desired_species) def __str__(self): return "%s and score is %d" % (self.name, self.get_score) </code></pre> <p>I've tried placing <code>__str__</code> and <code>__repr__</code> methods in the various classes. What I've read suggests <code>__str__</code> is what I'm after. I've also tried placing a simple "for loop" with a print statement in the function body itself. This latter approach lead to a screen full of errors. I've printed using <code>__str__</code> before with success, but everything seems to be failing me on this one. Any insight would be appreciated. </p>
-1
2016-08-08T01:57:20Z
38,820,891
<p>As per your comment, you are printing these within a list. The <code>print</code> function retrieves the string representation of each argument to be printed, defined by <code>__str__</code>. The string representation of a list consists of square brackets enclosing a comma+space-separated sequence of the <code>repr</code> representation of each item, defined by <code>__repr__</code>.</p> <pre><code>&gt;&gt;&gt; class A: ... def __str__(self): ... return 's' ... def __repr__(self): ... return 'r' ... &gt;&gt;&gt; l = [A(), A()] &gt;&gt;&gt; print(l) [r, r] &gt;&gt;&gt; print(*l) s s </code></pre> <p>If you want to see the human-readable string representation of each item, either pass them directly to the <code>print</code> function with the <code>*</code> unpacking operator, or loop over that list and call <code>print</code> on each item. If you want to see a human-readable string representation of each item when you directly print the list that holds them, you can define a <code>__repr__</code> that returns the same thing as the <code>__str__</code> method does, but it's not recommended, as that's supposed to be a way of reproducing the object with <code>eval</code>.</p>
1
2016-08-08T03:28:09Z
[ "python", "python-2.7", "oop", "methods", "printing" ]
Faster way to remove a dictionary of phrase from a list of string using Python
38,820,345
<p>I have to remove a dictionary of phrase from a list of string using Python</p> <p>A list of strings L1. Example: <code>L1 = ['Programmer New York', 'Programmer San Francisco']</code></p> <p>A dictionary of phrase L2 (all of them are more than one word). Example: <code>L2={'New York', 'San Francisco'}</code></p> <p>The expected output is, for each string in L1, remove substring that exists in L2. So the output will be <code>res=['Programmer', 'Programmer']</code>.</p> <pre><code>def foo(L1, L2): res = [] print len(L1) for i in L1: for j in L2: if j in i: i = i.replace(j, "") res.append(i) return res </code></pre> <p>My current program is a brute force double for loop. But is it possible to improve the performance? Especially when L1 size is very large.</p>
0
2016-08-08T02:03:25Z
38,820,619
<p>Try using map() and re,</p> <pre><code>import re res = map(lambda i, j: re.sub(" "+i, '', j), L2, L1) </code></pre> <p>The double quotes before the i are there to eliminate the trailing space after programmer.</p> <pre><code>return list(res) </code></pre> <p>P.S. returning a list explicitly is only necessary if you are using Python 3. Let me know if this improves your speed at all.</p>
1
2016-08-08T02:46:10Z
[ "python" ]
Faster way to remove a dictionary of phrase from a list of string using Python
38,820,345
<p>I have to remove a dictionary of phrase from a list of string using Python</p> <p>A list of strings L1. Example: <code>L1 = ['Programmer New York', 'Programmer San Francisco']</code></p> <p>A dictionary of phrase L2 (all of them are more than one word). Example: <code>L2={'New York', 'San Francisco'}</code></p> <p>The expected output is, for each string in L1, remove substring that exists in L2. So the output will be <code>res=['Programmer', 'Programmer']</code>.</p> <pre><code>def foo(L1, L2): res = [] print len(L1) for i in L1: for j in L2: if j in i: i = i.replace(j, "") res.append(i) return res </code></pre> <p>My current program is a brute force double for loop. But is it possible to improve the performance? Especially when L1 size is very large.</p>
0
2016-08-08T02:03:25Z
38,820,713
<p>You can use list comprehension to do so as:</p> <pre><code>l1 = ['Programmer New York', 'Programmer San Francisco'] l2=['New York', 'San Francisco'] a=[x.split(y) for x in l1 for y in l2 if y in x] res=["".join(x) for x in a] </code></pre>
0
2016-08-08T03:02:33Z
[ "python" ]
How to specify log file name with spider's name in scrapy?
38,820,540
<p>I'm using scrapy,in my scrapy project,I created several spider classes,as the official document said,I used this way to specify log file name: </p> <pre><code> def logging_to_file(file_name): """ @rtype: logging @type file_name:str @param file_name: @return: """ import logging from scrapy.utils.log import configure_logging configure_logging(install_root_handler=False) logging.basicConfig( filename=filename+'.txt', filemode='a', format='%(levelname)s: %(message)s', level=logging.DEBUG, ) Class Spider_One(scrapy.Spider): name='xxx1' logging_to_file(name) ...... Class Spider_Two(scrapy.Spider): name='xxx2' logging_to_file(name) ...... </code></pre> <p>Now,if I start <code>Spider_One</code>,everything is correct!But,if I start <code>Spider Two</code>,the log file of <code>Spider Two</code> will also be named with the name of <code>Spider One</code>!<br> I have searched many answers from google and stackoverflow,but unfortunately,none worked!<br> I am using python 2.7 &amp; scrapy 1.1!<br> Hope anyone can help me!</p>
0
2016-08-08T02:33:27Z
38,823,821
<p>It's because you initiate <code>logging_to_file</code> every time when you load up your package. You are using a class variable here where you should use instance variable.</p> <p>When python loads in your package or module ir loads every class and so on.</p> <pre><code>class MyClass: # everything you do here is loaded everytime when package is loaded name = 'something' def __init__(self): # everything you do here is loaded ONLY when the object is created # using this class </code></pre> <p>To resolve your issue just move <code>logging_to_file</code> function call to your spiders <code>__init__()</code> method.</p> <pre><code>class MyClass(Spider): name = 'xx1' def __init__(self): super(MyClass, self).__init__() logging_to_file(self.name) </code></pre>
1
2016-08-08T07:49:06Z
[ "python", "scrapy" ]
Python: Installing gooey using pip error
38,820,550
<p>I am trying to install Gooey for python and I keep on getting this error in cmd ... I installed the latest version of pip and am running on the latest version of python:</p> <pre><code>C:\Users\markj&gt;pip install Gooey Collecting Gooey Using cached Gooey-0.9.2.3.zip Complete output from command python setup.py egg_info: Traceback (most recent call last): File "&lt;string&gt;", line 1, in &lt;module&gt; File "C:\Users\markj\AppData\Local\Temp\pip-build-ti2h9xu3\Gooey\setup.py", line 9, in &lt;module&gt; version = __import__('gooey').__version__ File "C:\Users\markj\AppData\Local\Temp\pip-build-ti2h9xu3\Gooey\gooey\__init__.py", line 2, in &lt;module&gt; from gooey.python_bindings.gooey_decorator import Gooey File "C:\Users\markj\AppData\Local\Temp\pip-build-ti2h9xu3\Gooey\gooey\python_bindings\gooey_decorator.py", line 54 except Exception, e: ^ SyntaxError: invalid syntax ---------------------------------------- Command "python setup.py egg_info" failed with error code 1 in C:\Users\markj\AppData\Local\Temp\pip-build-ti2h9xu3\Gooey\ </code></pre> <p>Can someone please help me? Thank you!</p>
0
2016-08-08T02:34:50Z
38,820,637
<p>You could try downloading the .zip file from the official website here <a href="http://chriskiehl.github.io/Gooey/" rel="nofollow">http://chriskiehl.github.io/Gooey/</a> and try downloading it to see if that will work.</p>
1
2016-08-08T02:50:18Z
[ "python", "cmd", "install", "pip" ]
Python: Installing gooey using pip error
38,820,550
<p>I am trying to install Gooey for python and I keep on getting this error in cmd ... I installed the latest version of pip and am running on the latest version of python:</p> <pre><code>C:\Users\markj&gt;pip install Gooey Collecting Gooey Using cached Gooey-0.9.2.3.zip Complete output from command python setup.py egg_info: Traceback (most recent call last): File "&lt;string&gt;", line 1, in &lt;module&gt; File "C:\Users\markj\AppData\Local\Temp\pip-build-ti2h9xu3\Gooey\setup.py", line 9, in &lt;module&gt; version = __import__('gooey').__version__ File "C:\Users\markj\AppData\Local\Temp\pip-build-ti2h9xu3\Gooey\gooey\__init__.py", line 2, in &lt;module&gt; from gooey.python_bindings.gooey_decorator import Gooey File "C:\Users\markj\AppData\Local\Temp\pip-build-ti2h9xu3\Gooey\gooey\python_bindings\gooey_decorator.py", line 54 except Exception, e: ^ SyntaxError: invalid syntax ---------------------------------------- Command "python setup.py egg_info" failed with error code 1 in C:\Users\markj\AppData\Local\Temp\pip-build-ti2h9xu3\Gooey\ </code></pre> <p>Can someone please help me? Thank you!</p>
0
2016-08-08T02:34:50Z
38,820,739
<p>You're probably running the package under python 3 which is not fully compatible with the package. </p> <p>That exception means your python can't understand the syntax. In python 3 the comma (<code>,</code>) <a href="https://www.python.org/dev/peps/pep-3110/#grammar-changes" rel="nofollow">is not allowed</a>, correct syntax is:</p> <pre><code>except Exception as e </code></pre> <p>Simple test on python 3:</p> <pre><code>$ python Python 3.4.2 (default, Oct 8 2014, 10:45:20) [GCC 4.9.1] on linux Type "help", "copyright", "credits" or "license" for more information. &gt;&gt;&gt; try: ... assert 1 + 1 == 3 ... except Exception, e: File "&lt;stdin&gt;", line 3 except Exception, e: ^ SyntaxError: invalid syntax &gt;&gt;&gt; </code></pre> <p>With python 2.7.9:</p> <pre><code>$ python Python 2.7.9 (default, Mar 1 2015, 12:57:24) [GCC 4.9.2] on linux2 Type "help", "copyright", "credits" or "license" for more information. &gt;&gt;&gt; try: ... assert 1 + 1 == 3 ... except Exception, e: ... print 'false' ... false &gt;&gt;&gt; </code></pre> <p>So, try to install your package in python2, it should be fine.</p> <p>EDIT 1: According to <a href="https://github.com/chriskiehl/Gooey/issues/65" rel="nofollow">this issue</a> that package is compatible with python 2 only.</p>
0
2016-08-08T03:07:44Z
[ "python", "cmd", "install", "pip" ]
Printing specific records - error message says String indices must be integer
38,820,554
<p>I am struggling with a solution to my python code.<br> I want to print specific column_names (e.g. <strong>Date</strong>, <strong>Open</strong> etc...). When I attempt to print the <strong>Date</strong> record I get an error <code>String indices must be integers</code>. Below is a copy of my code and the json file.<br> What am I missing?</p> <pre><code>import json, urllib import quandl url = "https://www.quandl.com/api/v3/datasets/WIKI/FB.json?" loaded = urllib.urlopen(url).read() data = json.loads(loaded) for Date in data['dataset']['dataset_code']['Date']: print(Date) </code></pre> <p>JSON file</p> <pre><code>{ "dataset": { "dataset_code": "FB", "column_names": [ "Date", "Open", "High", "Low", "Close", "Volume", "Ex-Dividend", "Split Ratio", "Adj. Open", "Adj. High", "Adj. Low", "Adj. Close", "Adj. Volume" ], </code></pre>
0
2016-08-08T02:35:27Z
38,820,600
<p><code>"dataset_code"</code> is a string.</p> <p>When you do <code>data['dataset']['dataset_code']</code>, the result is <code>"FB"</code>. <code>"FB"</code> is a string, and therefore can't be indexed by another string. <code>data['dataset']['dataset_code']['Date']</code> is, in your example file, equivalent to <code>"FB"['Date']</code>. You can only use the <code>[]</code> operators on strings like <code>"FB"</code> if you're putting a number (an "<code>integer</code>") inside.</p> <hr> <p>To do anything meaningful with this data, you might want to restructure the items into <code>dict</code>s. This will make the data a lot easier to work with. You could use this code to get a list of dicts:</p> <pre><code>cnames = data["dataset"]["column_names"] entries = data["dataset"]["data"] data_dict = [{c: entry[i] for i, c in enumerate(cnames)} for entry in entries] </code></pre> <p>Now, your data is in the form of</p> <pre><code>[ { "Volume": 20184035.0, "Ex-Dividend": 0.0, "Adj. High": 125.835, "Adj. Close": 125.15, "Adj. Volume": 20184035.0, "High": 125.835, "Adj. Low": 124.6184, "Adj. Open": 124.98, "Low": 124.6184, "Date": "2016-08-05", "Close": 125.15, "Split Ratio": 1.0, "Open": 124.98 }, { "Volume": 21065974.0, "Ex-Dividend": 0.0, "Adj. High": 124.79, "Adj. Close": 124.36, "Adj. Volume": 21065974.0, "High": 124.79, "Adj. Low": 122.51, "Adj. Open": 122.94, "Low": 122.51, "Date": "2016-08-04", "Close": 124.36, "Split Ratio": 1.0, "Open": 122.94 }, ] </code></pre> <p>With this new format, selecting all values for an individual row is very simple. To get all the dates from the dataset, just use:</p> <pre><code>[x["Date"] for x in data2] </code></pre> <p>For convenience, wrap this in a function:</p> <pre><code>def get_all_values(key): return [x[key] for x in data2] </code></pre> <p>Now, you can use <code>get_all_values("Date")</code> or <code>get_all_values("Open")</code>.</p> <p>Your full code would be:</p> <pre><code>import json, urllib import quandl url = "https://www.quandl.com/api/v3/datasets/WIKI/FB.json?" loaded = urllib.urlopen(url).read() data = json.loads(loaded) cnames = data["dataset"]["column_names"] entries = data["dataset"]["data"] data2 = [{c: entry[i] for i, c in enumerate(cnames)} for entry in entries] def get_all_values(key): return [x[key] for x in data2] # Get all the dates from the dataset for Date in get_all_values("Date"): print(Date) </code></pre> <p>Hope I helped!</p>
1
2016-08-08T02:42:17Z
[ "python" ]
Can't get AJAX POST working in Django
38,820,720
<p>I can't do a POST ajax request in Django. If I do a GET request, it works fine. I think it may be a csrf token problem, but I can get it to work</p> <p>My view:</p> <pre><code>@login_required def add_share_points(request): user = request.user profile = request.user.profile if request.method == 'POST': value = 5.0 # Premia o usuário ao compartilhar conteúdo VirtualCurrencyTransaction.objects.create( user=request.user, reason=2, description='Você completou seu perfil', value=value ) return "ok" </code></pre> <p>My AJAX request:</p> <pre><code>$('.my-button').on('click', function(e) { e.preventDefault(); var pointsCount = $(this).hasClass('homepage-facebook-share') ? 3 : 2; $.ajax({ type:"POST", url: "/add_share_points", data: { points: pointsCount, } }).done(function() { alert('Posting completed.'); }).fail(function(){ alert('Error while posting.'); }); }); </code></pre> <p>In my script, I also have this setting:</p> <pre><code>function csrfSafeMethod(method) { return (/^(GET|HEAD|OPTIONS|TRACE)$/).test(method); } $.ajaxSetup({ crossDomain: false, beforeSend: function(xhr, settings) { if (!csrfSafeMethod(settings.type)) { xhr.setRequestHeader('X-CSRFToken', CSRF_TOKEN); } } }); </code></pre> <p>What is wrong with my code? It gave my a 500 error code, but no further explanation in the logs.</p>
0
2016-08-08T03:04:17Z
38,833,785
<p>I will point out several things to correct, some are just ways to do it in a django manner and not problems.</p> <p><strong>In your view</strong></p> <pre><code> return HttpResponse( json.dumps({'result': 'ok',}), content_type="application/json" ) </code></pre> <p><strong>In your ajax</strong></p> <pre><code>url: "/add_share_points", </code></pre> <p>should be:</p> <pre><code>url : {% url '&lt;name in url.py&gt;' %}, </code></pre> <p>and you need to add (to the data object):</p> <pre><code> csrfmiddlewaretoken: '{{ csrf_token }}' </code></pre> <p>Inside the ajax request, insert this after data:</p> <pre><code>// handle a successful response success : function(json) { if (json.result=== 'ok'){ console.log('It works!'); }else{ console.log('Something wrong with response'); } // handle a non-successful response error : function(xhr,errmsg,err) { console.log(err); } </code></pre> <p><strong>In your script</strong></p> <p>instead of <code>CSRF_TOKEN</code> use <code>'{{ csrf_token }}'</code></p> <blockquote> <p>Please use my suggestions and give me feedback and I will update the answer. The two with <code>csfrtoken</code> are probably the problems your having. If you put Django in Debug mode it will be easyer to find out.</p> </blockquote> <p><strong>My Suggestion</strong></p> <p>Create a <a href="https://docs.djangoproject.com/en/1.10/topics/forms/" rel="nofollow">form</a> with what you need to post to gain some features in the validation process.</p>
2
2016-08-08T16:01:58Z
[ "python", "ajax", "django", "python-3.x" ]
AttributeError: 'str' object has no attribute 'get' error in code
38,820,735
<p>I'm trying to make a login style screen and have the frame behind raised if the password matches I get the error <code>AttributeError: 'str' object has no attribute 'get'</code> when I run my program :</p> <pre><code>class StartPage(tk.Frame): entry = "placeholder" def framechange(self): if self.entry.get() == "password": command = lambda: controller.show_frame("PageOne") def __init__(self, parent, controller): tk.Frame.__init__(self, parent) self.controller = controller label = tk.Label(self, text="Welcome", font=TITLE_FONT) label.pack(side="top", fill="x", pady=10) entry = tk.Entry(self, show="•") entry.pack(side="top", fill="x", pady=10, padx=10) button1 = tk.Button(self, text="Login",command = self.framechange) button1.pack() </code></pre> <p>Many thanks</p>
0
2016-08-08T03:06:06Z
38,820,747
<p>There are no <code>get()</code> requests called from inside the class. Also, you are missing a <code>self</code> in your input statement:</p> <p>Try:</p> <pre><code>class StartPage(tk.Frame): entry = "placeholder" def framechange(self): if self.entry == "password": # Changed command = lambda: controller.show_frame("PageOne") def __init__(self, parent, controller): tk.Frame.__init__(self, parent) self.controller = controller label = tk.Label(self, text="Welcome", font=TITLE_FONT) label.pack(side="top", fill="x", pady=10) self.entry = tk.Entry(self, show="•") # Changed self.entry.pack(side="top", fill="x", pady=10, padx=10) # Changed button1 = tk.Button(self, text="Login",command = self.framechange) button1.pack() </code></pre>
0
2016-08-08T03:08:34Z
[ "python", "get" ]
Denoise and filter an image
38,820,743
<p>I am doing a <strong>license-plate recognition</strong>. I have crop out the plate but it is very <strong>blurred</strong>. Therefore I cannot split out the digits/characters and recognize it.</p> <p>Here is my image:</p> <p><a href="http://i.stack.imgur.com/qQ8CF.jpg"><img src="http://i.stack.imgur.com/qQ8CF.jpg" alt="enter image description here"></a></p> <p>I have tried to <strong>denoise</strong> it through using <strong>scikit image</strong> function.</p> <p>First, import the libraries:</p> <pre><code>import cv2 from skimage import restoration from skimage.filters import threshold_otsu, rank from skimage.morphology import closing, square, disk </code></pre> <p>then, I read the image and convert it to <strong>gray scale</strong></p> <pre><code>image = cv2.imread("plate.jpg") image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) </code></pre> <p>I try to <strong>remove the noise</strong>:</p> <pre><code>denoise = restoration.denoise_tv_chambolle(image , weight=0.1) thresh = threshold_otsu(denoise) bw = closing(denoise &gt; thresh, square(2)) </code></pre> <p>What I got is :</p> <p><a href="http://i.stack.imgur.com/udt2O.jpg"><img src="http://i.stack.imgur.com/udt2O.jpg" alt="enter image description here"></a></p> <p>As you can see, all the digits are <strong>mixed together</strong>. Thus, I <strong>cannot separate</strong> them and recognize the characters one by one.</p> <p>What I expect is something like this (I draw it):</p> <p><a href="http://i.stack.imgur.com/OHQN3.png"><img src="http://i.stack.imgur.com/OHQN3.png" alt="enter image description here"></a></p> <p>I am looking for help how can I better filter the image? Thank you.</p> <p>===================================================================== <strong>UPDATE</strong>:</p> <p>After using <code>skimage.morphology.erosion</code>, I got:</p> <p><a href="http://i.stack.imgur.com/Z0Pak.jpg"><img src="http://i.stack.imgur.com/Z0Pak.jpg" alt="enter image description here"></a></p>
9
2016-08-08T03:08:06Z
38,959,356
<p>First, this image seems to be more defaced by blur, than by noize, so there are no good reasons to denoise it, try debluring instead. </p> <p>The simplest would be inverse filtering or even Wiener filtering. Then you'll need to separate image's background from letters by the level of luminosity for example with watershed algorithm. Then you'll get separate letters which you need to pass through one of classifiers for example, based on neural networks (even simplistic feed-forward net would be ok).</p> <p>And then you'll finally get the textual representation. That's how such recognitions are usually made. There's good <a href="http://rads.stackoverflow.com/amzn/click/013168728X">book by Gonzalez&amp;Woods</a>, try looking for detailed explaination there.</p>
6
2016-08-15T16:49:36Z
[ "python", "image", "opencv", "image-processing", "scikit-image" ]
Denoise and filter an image
38,820,743
<p>I am doing a <strong>license-plate recognition</strong>. I have crop out the plate but it is very <strong>blurred</strong>. Therefore I cannot split out the digits/characters and recognize it.</p> <p>Here is my image:</p> <p><a href="http://i.stack.imgur.com/qQ8CF.jpg"><img src="http://i.stack.imgur.com/qQ8CF.jpg" alt="enter image description here"></a></p> <p>I have tried to <strong>denoise</strong> it through using <strong>scikit image</strong> function.</p> <p>First, import the libraries:</p> <pre><code>import cv2 from skimage import restoration from skimage.filters import threshold_otsu, rank from skimage.morphology import closing, square, disk </code></pre> <p>then, I read the image and convert it to <strong>gray scale</strong></p> <pre><code>image = cv2.imread("plate.jpg") image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) </code></pre> <p>I try to <strong>remove the noise</strong>:</p> <pre><code>denoise = restoration.denoise_tv_chambolle(image , weight=0.1) thresh = threshold_otsu(denoise) bw = closing(denoise &gt; thresh, square(2)) </code></pre> <p>What I got is :</p> <p><a href="http://i.stack.imgur.com/udt2O.jpg"><img src="http://i.stack.imgur.com/udt2O.jpg" alt="enter image description here"></a></p> <p>As you can see, all the digits are <strong>mixed together</strong>. Thus, I <strong>cannot separate</strong> them and recognize the characters one by one.</p> <p>What I expect is something like this (I draw it):</p> <p><a href="http://i.stack.imgur.com/OHQN3.png"><img src="http://i.stack.imgur.com/OHQN3.png" alt="enter image description here"></a></p> <p>I am looking for help how can I better filter the image? Thank you.</p> <p>===================================================================== <strong>UPDATE</strong>:</p> <p>After using <code>skimage.morphology.erosion</code>, I got:</p> <p><a href="http://i.stack.imgur.com/Z0Pak.jpg"><img src="http://i.stack.imgur.com/Z0Pak.jpg" alt="enter image description here"></a></p>
9
2016-08-08T03:08:06Z
38,971,196
<p>I concur with the opinion that you should probably try to optimize the input image quality.</p> <p>Number plate blur is a typical example of motion blur. How well you can deblur depends upon how big or small is the blur radius. Generally greater the speed of the vehicle, larger the blur radius and therefore more difficult to restore.</p> <p>A simple solution that somewhat works is de-interlacing of images.</p> <p><a href="http://i.stack.imgur.com/dwNUD.png"><img src="http://i.stack.imgur.com/dwNUD.png" alt="enter image description here"></a></p> <p>Note that it is only slightly more readable than your input image. Here I have dropped every alternate line and resized the image to half its size using PIL/Pillow and this is what I get:</p> <pre><code>from PIL import Image img=Image.open("license.jpeg") size=list(img.size) size[0] /= 2 size[1] /= 2 smaller_image=img.resize(size, Image.NEAREST) smaller_image.save("smaller_image.png") </code></pre> <p>The next and more formal approach is <strong>deconvolution</strong>.</p> <p>Since blurring is achieved using convolution of images, deblurring requires doing the inverse of convolution or deconvolution of the image. There are various kinds of deconvolution algorithms like the Wiener deconvolution, Richardson-Lucy method, Radon transform and a few types of Bayesian filtering.</p> <p>You can apply Wiener deconvolution algorithm using this <a href="https://github.com/opencv/opencv/blob/master/samples/python/deconvolution.py">code</a>. Play with the angle, diameter and signal to noise ratio and see if it provides some improvements.</p> <p>The <code>skimage.restoration</code> module also provides implementation of both <code>unsupervised_wiener</code> and <code>richardson_lucy</code> deconvolution. </p> <p>In the code below I have shown both the implementations but you will have to modify the psf to see which one suits better.</p> <pre><code>import numpy as np import matplotlib.pyplot as plt import cv2 from skimage import color, data, restoration from scipy.signal import convolve2d as conv2 img = cv2.imread('license.jpg') licence_grey_scale = color.rgb2gray(img) psf = np.ones((5, 5)) / 25 # comment/uncomment next two lines one by one to see unsupervised_wiener and richardson_lucy deconvolution deconvolved, _ = restoration.unsupervised_wiener(licence_grey_scale, psf) deconvolved = restoration.richardson_lucy(licence_grey_scale, psf) fig, ax = plt.subplots() plt.gray() ax.imshow(deconvolved) ax.axis('off') plt.show() </code></pre> <p>Unfortunately most of these deconvolution alogirthms require you to know in advance the blur kernel (aka the Point Spread Function aka PSF). </p> <p>Here since you do not know the PSF, so you will have to use blind deconvolution. Blind deconvolution attempts to estimate the original image without any knowledge of the blur kernel.</p> <p>I have not tried this with your image but here is a Python implementation of blind deconvolution algorithm: <a href="https://github.com/alexis-mignon/pydeconv">https://github.com/alexis-mignon/pydeconv</a></p> <p>Note that an effective general purpose blind deconvolution algorithms has not yet been found and is an active field of research.</p>
7
2016-08-16T09:27:06Z
[ "python", "image", "opencv", "image-processing", "scikit-image" ]
Denoise and filter an image
38,820,743
<p>I am doing a <strong>license-plate recognition</strong>. I have crop out the plate but it is very <strong>blurred</strong>. Therefore I cannot split out the digits/characters and recognize it.</p> <p>Here is my image:</p> <p><a href="http://i.stack.imgur.com/qQ8CF.jpg"><img src="http://i.stack.imgur.com/qQ8CF.jpg" alt="enter image description here"></a></p> <p>I have tried to <strong>denoise</strong> it through using <strong>scikit image</strong> function.</p> <p>First, import the libraries:</p> <pre><code>import cv2 from skimage import restoration from skimage.filters import threshold_otsu, rank from skimage.morphology import closing, square, disk </code></pre> <p>then, I read the image and convert it to <strong>gray scale</strong></p> <pre><code>image = cv2.imread("plate.jpg") image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) </code></pre> <p>I try to <strong>remove the noise</strong>:</p> <pre><code>denoise = restoration.denoise_tv_chambolle(image , weight=0.1) thresh = threshold_otsu(denoise) bw = closing(denoise &gt; thresh, square(2)) </code></pre> <p>What I got is :</p> <p><a href="http://i.stack.imgur.com/udt2O.jpg"><img src="http://i.stack.imgur.com/udt2O.jpg" alt="enter image description here"></a></p> <p>As you can see, all the digits are <strong>mixed together</strong>. Thus, I <strong>cannot separate</strong> them and recognize the characters one by one.</p> <p>What I expect is something like this (I draw it):</p> <p><a href="http://i.stack.imgur.com/OHQN3.png"><img src="http://i.stack.imgur.com/OHQN3.png" alt="enter image description here"></a></p> <p>I am looking for help how can I better filter the image? Thank you.</p> <p>===================================================================== <strong>UPDATE</strong>:</p> <p>After using <code>skimage.morphology.erosion</code>, I got:</p> <p><a href="http://i.stack.imgur.com/Z0Pak.jpg"><img src="http://i.stack.imgur.com/Z0Pak.jpg" alt="enter image description here"></a></p>
9
2016-08-08T03:08:06Z
38,987,741
<p><a href="http://i.stack.imgur.com/aHPAa.jpg" rel="nofollow"><img src="http://i.stack.imgur.com/aHPAa.jpg" alt="result using ChanVeseBinarize with binarized kernel "></a></p> <p>ChanVeseBinarize with an image enhanced binarized kernel gave me this result. This is helpful to highlight 4,8,1 and 2. I guess you need to do separate convolution with each character and if the peak of the convolution is higher than a threshold we can assume that letter to be present at the location of the peak. To take care of distortion, you need to do the convolution with few different types of font of a given character.</p> <p><a href="http://i.stack.imgur.com/OJGA8.png" rel="nofollow"><img src="http://i.stack.imgur.com/OJGA8.png" alt="enter image description here"></a></p> <p>Another potential improvement using derivative filter and little bit of Gaussian smoothing. The K &amp; X are not as distorted as the previous solution.</p>
1
2016-08-17T03:40:07Z
[ "python", "image", "opencv", "image-processing", "scikit-image" ]
Convert all columns in a Dataframe to a single secondary index in a MultiIndex Dataframe
38,820,879
<p>I have a dataframe called Pop that looks like the following (Sorry, this doesn't format very well, but it's a single Index DataFrame (State is the index) with columns for each year. I only put a small sample of the data):</p> <pre><code> 2007 2008 2009 State Alabama 4637904 4677464 4708708 </code></pre> <p>Is there a way to easily convert this to a MultiIndex DataFrame where the Year becomes a secondary index. The only item is a new field 'Population' that takes the data from the original dataFrame. It would ideally look something like:</p> <pre><code> Population State Year Alabama 2007 4637904 Alabama 2008 4677464 Alabama 2009 4708708 </code></pre> <p>etc...</p> <p>I've been looking for some kind of code since this seems like something that could be handled in Python. Thank you!</p>
0
2016-08-08T03:26:50Z
38,821,117
<p>You can use <a href="http://pandas.pydata.org/pandas-docs/stable/generated/pandas.melt.html" rel="nofollow">melt</a> to convert columns into rows :</p> <pre><code>In [10]: df Out[10]: state 2007 2008 2009 0 Alabama 4637904 4677464 4708708 1 Maine 1234567 1432765 1432567 2 Florida 19432765 19123456 19765654 In [13]: out = pd.melt(df, id_vars=['state'], var_name=['year'], value_name='population') In [14]: out Out[14]: state year population 0 Alabama 2007 4637904 1 Maine 2007 1234567 2 Florida 2007 19432765 3 Alabama 2008 4677464 4 Maine 2008 1432765 5 Florida 2008 19123456 6 Alabama 2009 4708708 7 Maine 2009 1432567 8 Florida 2009 19765654 </code></pre> <p>And finally setting the index :</p> <pre><code>In [15]: out.set_index(['state', 'year']) Out[15]: population state year Alabama 2007 4637904 Maine 2007 1234567 Florida 2007 19432765 Alabama 2008 4677464 Maine 2008 1432765 Florida 2008 19123456 Alabama 2009 4708708 Maine 2009 1432567 Florida 2009 19765654 </code></pre> <p>PS : I used random values for the states population</p>
1
2016-08-08T04:02:54Z
[ "python", "dataframe", "multi-index" ]
Invert first byte in file
38,820,954
<p>What is the most pythonic way to change first byte of file to its inversion copy? Now, I use this code:</p> <pre><code>with open(file_path, 'r+b') as f: b = bytearray(f.read(1)) b[0] = ~b[0] &amp; 255 f.seek(0) f.write(b) </code></pre>
1
2016-08-08T03:37:38Z
38,821,127
<p>Here's an alternative that uses a memory-mapped file:</p> <pre><code>import mmap with open(file_path, 'r+b') as f, mmap.mmap(f.fileno(), 1) as mm: mm[0] ^= 255 </code></pre>
2
2016-08-08T04:04:20Z
[ "python", "python-3.x" ]
Import large CSV file with utf-8(Chinese chars) contents in Python 3.5
38,820,981
<p>First of all, I know there is a standard way of doing the task I state in the title. For example, </p> <pre class="lang-python prettyprint-override"><code>import csv with open('test.txt', encoding='utf-8') as f: reader = csv.reader(f) for row in reader: print(row) </code></pre> <p>I apply this code on my data file(~262MB) in a Jupyter terminal, I get this:</p> <pre><code>--------------------------------------------------------------------------- UnicodeDecodeError Traceback (most recent call last) &lt;ipython-input-21-cbed80c58499&gt; in &lt;module&gt;() 2 with open('CarRecord.txt', encoding='utf-8') as f: 3 reader = csv.reader(f) ----&gt; 4 for row in reader: 5 print(row) //anaconda/envs/py35/lib/python3.5/codecs.py in decode(self, input, final) 319 # decode input (taking the buffer into account) 320 data = self.buffer + input --&gt; 321 (result, consumed) = self._buffer_decode(data, self.errors, final) 322 # keep undecoded input until the next call 323 self.buffer = data[consumed:] UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa9 in position 74: invalid start byte </code></pre> <p>Okay, position 74 is at the first row of my data file, where the first Chinese char. comes up. So I do another quick test, which I copy the first few rows from my data file and paste them into another new file. I run the same code with the test file, and now it just works as normal as I would expect, without any error messages.</p> <p>Anyone has any ideas, please?....</p> <p>------updated following the ideas in the comment:-------</p> <pre class="lang-python prettyprint-override"><code>import csv with open('CarRecord.txt', mode='rb') as f: decoded_file = f.read().decode('utf-16') reader = csv.reader(decoded_file, delimiter=',') for row in reader: print(row) </code></pre> <p>now I get: </p> <pre><code>--------------------------------------------------------------------------- UnicodeDecodeError Traceback (most recent call last) &lt;ipython-input-37-3708b52ef0a3&gt; in &lt;module&gt;() 1 import csv 2 with open('CarRecord.txt', mode='rb') as f: ----&gt; 3 decoded_file = f.read().decode('utf-16') 4 reader = csv.reader(decoded_file, delimiter=',') 5 for row in reader: UnicodeDecodeError: 'utf-16-le' codec can't decode bytes in position 1780-1781: illegal UTF-16 surrogate </code></pre>
0
2016-08-08T03:42:14Z
38,822,652
<p>It is not a precise answer to the question. </p> <p>It turns out that the original data file -- although it contains unicode characters -- was encoded with ASCII. So I save a new data file and encode it with utf-8, and the standard method of reading CSV file just worked.</p>
0
2016-08-08T06:35:59Z
[ "python", "csv", "utf-8" ]
Scrapy Pull Same Data from Multiple Pages
38,821,027
<p>This is related to the previous question I wrote <a href="http://stackoverflow.com/questions/38781357/pro-football-reference-team-stats-xpath/38781659#38781659">here</a>. I am trying to pull the same data from multiple pages on the same domain. A small explanation, I'm trying to pull data like offensive yards, turnovers, etc from a bunch of different box scores on a <a href="http://www.pro-football-reference.com/years/2015/games.htm" rel="nofollow">main page</a>. Pulling the data from individual pages is working properly as is generation of the urls but when I try to have the spider cycle through all of the pages nothing is returned. I've looked through many other questions people have asked and the documentation and I can't figure out what is not working. Code is below. Thanks to anyone who's able to help in advance.</p> <pre><code>import scrapy from scrapy import Selector from nflscraper.items import NflscraperItem class NFLScraperSpider(scrapy.Spider): name = "pfr" allowed_domains = ['www.pro-football-reference.com/'] start_urls = [ "http://www.pro-football-reference.com/years/2015/games.htm" #"http://www.pro-football-reference.com/boxscores/201510110tam.htm" ] def parse(self,response): for href in response.xpath('//a[contains(text(),"boxscore")]/@href'): item = NflscraperItem() url = response.urljoin(href.extract()) request = scrapy.Request(url, callback=self.parse_dir_contents) request.meta['item'] = item yield request def parse_dir_contents(self,response): item = response.meta['item'] # Code to pull out JS comment - http://stackoverflow.com/questions/38781357/pro-football-reference-team-stats-xpath/38781659#38781659 extracted_text = response.xpath('//div[@id="all_team_stats"]//comment()').extract()[0] new_selector = Selector(text=extracted_text[4:-3].strip()) # Item population item['home_score'] = response.xpath('//*[@id="content"]/table/tbody/tr[2]/td[last()]/text()').extract()[0].strip() item['away_score'] = response.xpath('//*[@id="content"]/table/tbody/tr[1]/td[last()]/text()').extract()[0].strip() item['home_oyds'] = new_selector.xpath('//*[@id="team_stats"]/tbody/tr[6]/td[2]/text()').extract()[0].strip() item['away_oyds'] = new_selector.xpath('//*[@id="team_stats"]/tbody/tr[6]/td[1]/text()').extract()[0].strip() item['home_dyds'] = item['away_oyds'] item['away_dyds'] = item['home_oyds'] item['home_turn'] = new_selector.xpath('//*[@id="team_stats"]/tbody/tr[8]/td[2]/text()').extract()[0].strip() item['away_turn'] = new_selector.xpath('//*[@id="team_stats"]/tbody/tr[8]/td[1]/text()').extract()[0].strip() yield item </code></pre>
1
2016-08-08T03:49:32Z
38,821,054
<p>The subsequent requests you make <em>are filtered as offsite</em>, fix your <code>allowed_domains</code> setting:</p> <pre><code>allowed_domains = ['pro-football-reference.com'] </code></pre> <p>Worked for me.</p>
1
2016-08-08T03:52:45Z
[ "python", "scrapy" ]
Connecting to google using python stays blank
38,821,114
<p>I am learning python and I am running into an issue</p> <pre><code>from selenium import webdriver driver = webdriver.Firefox() driver.get('http://www.google.com') </code></pre> <p>my firefox launches but it does not go on google.com like it is supposed to be but rather stays as a blank page.</p>
2
2016-08-08T04:02:23Z
38,822,551
<p>I would recommend using chrome instead because your script does work. so... <a href="http://chromedriver.storage.googleapis.com/2.9/chromedriver_win32.zip" rel="nofollow">http://chromedriver.storage.googleapis.com/2.9/chromedriver_win32.zip</a> Download that and extract it to your <code>python27/scripts</code> folder located in the root directory in the <code>C:\\</code> Drive and then run this script.</p> <pre><code>from selenium import webdriver driver = webdriver.Chrome() driver.get('http://www.facebook.com') </code></pre> <p>And It should connect you to facebook.com Hope I helped. :)</p>
0
2016-08-08T06:30:05Z
[ "python", "selenium", "firefox" ]
Python Multiprocessing using Process: Consuming Large Memory
38,821,247
<p>I am running multiple processes from single python code:</p> <p>Code Snippet:</p> <pre><code>while 1: if sqsObject.msgCount() &gt; 0: ReadyMsg = sqsObject.readM2Q() if ReadyMsg == 0: continue fileName = ReadyMsg['fileName'] dirName = ReadyMsg['dirName'] uuid = ReadyMsg['uid'] guid = ReadyMsg['guid'] callback = ReadyMsg['callbackurl'] # print ("Trigger Algorithm Process") if(countProcess &lt; maxProcess): try: retValue = Process(target=dosomething, args=(dirName, uuid,guid,callback)) processArray.append(retValue) retValue.start() countProcess = countProcess + 1 except: print "Cannot Run Process" else: for i in range(len(processArray)): if (processArray[i].is_alive() == True): continue else: try: #print 'Restart Process' processArray[i] = Process(target=dosomething, args=(dirName,uuid,guid,callback)) processArray[i].start() except: print "Cannot Run Process" else: # No more request to service for i in range(len(processArray)): if (processArray[i].is_alive() == True): processRunning = 1 break else: continue if processRunning == 0: countProcess = 0 else: processRunning = 0 </code></pre> <p>Here I am reading the messages from the queue and creating a process to run the algorithm on that message. I am putting upper limit of maxProcess. And hence after reaching maxProcess, I want to reuse the processArray slots which are not alive by checking is_alive().</p> <p>This process runs fine for smaller number of processes however, for large number of messages say 100, Memory consumption goes through roof. I am thinking I have leak by reusing the process slots.</p> <p>Not sure what is wrong in the process. </p> <p>Thank you in advance for spotting an error or wise advise.</p>
0
2016-08-08T04:18:55Z
38,821,501
<blockquote> <p>Not sure what is wrong in the process. </p> </blockquote> <p>It appears you are creating as many processes as there are messages, even when the maxProcess count is reached.</p> <blockquote> <p>I am thinking I have leak by reusing the process slots.</p> </blockquote> <p>There is no need to manage the processes yourself. Just use a <a href="https://docs.python.org/2/library/multiprocessing.html#using-a-pool-of-workers" rel="nofollow">process pool</a>:</p> <pre><code> # before your while loop starts from multiprocessing import Pool pool = Pool(processes=max_process) while 1: ... # instead of creating a new Process res = pool.apply_async(dosomething, args=(dirName,uuid,guid,callback)) # after the while loop has finished # -- wait to finish pool.close() pool.join() </code></pre> <p><strong>Ways to submit jobs</strong></p> <p>Note that the <a href="https://docs.python.org/2/library/multiprocessing.html#module-multiprocessing.pool" rel="nofollow"><code>Pool</code> class</a> supports several ways to submit jobs:</p> <ul> <li>apply_async - one message at a time</li> <li>map_async - a chunk of messages at a time</li> </ul> <p>If messages arrive fast enough it might be better to collect several of them (say 10 or 100 at a time, depending on the actual processing done) and use <code>map</code> to submit a "mini-batch" to the target function at a time:</p> <pre><code>... while True: messages = [] # build mini-batch of messages while len(messages) &lt; batch_size: ... # get message messages.append((dirName,uuid,guid,callback)) pool.map_async(dosomething, messages) </code></pre> <p>To avoid memory leaks left by <code>dosomething</code> you can ask the Pool to restart a process after it has consumed some number of messages:</p> <pre><code>max_tasks = 5 # some sensible number Pool(max_processes, maxtasksperchild=max_tasks) </code></pre> <p><strong>Going distributed</strong></p> <p>If with this approach the memory capacity is still exceeded, consider using a distributed approach i.e. add more machines. Using <a href="http://www.celeryproject.org/" rel="nofollow">Celery</a> that would be pretty straight forward, coming from the above:</p> <pre><code># tasks.py @task def dosomething(...): ... # same code as before # driver.py while True: ... # get messages as before res = somefunc.apply_async(args=(dirName,uuid,guid,callback)) </code></pre>
0
2016-08-08T04:51:58Z
[ "python", "memory-leaks", "python-multiprocessing" ]
Python Multiprocessing using Process: Consuming Large Memory
38,821,247
<p>I am running multiple processes from single python code:</p> <p>Code Snippet:</p> <pre><code>while 1: if sqsObject.msgCount() &gt; 0: ReadyMsg = sqsObject.readM2Q() if ReadyMsg == 0: continue fileName = ReadyMsg['fileName'] dirName = ReadyMsg['dirName'] uuid = ReadyMsg['uid'] guid = ReadyMsg['guid'] callback = ReadyMsg['callbackurl'] # print ("Trigger Algorithm Process") if(countProcess &lt; maxProcess): try: retValue = Process(target=dosomething, args=(dirName, uuid,guid,callback)) processArray.append(retValue) retValue.start() countProcess = countProcess + 1 except: print "Cannot Run Process" else: for i in range(len(processArray)): if (processArray[i].is_alive() == True): continue else: try: #print 'Restart Process' processArray[i] = Process(target=dosomething, args=(dirName,uuid,guid,callback)) processArray[i].start() except: print "Cannot Run Process" else: # No more request to service for i in range(len(processArray)): if (processArray[i].is_alive() == True): processRunning = 1 break else: continue if processRunning == 0: countProcess = 0 else: processRunning = 0 </code></pre> <p>Here I am reading the messages from the queue and creating a process to run the algorithm on that message. I am putting upper limit of maxProcess. And hence after reaching maxProcess, I want to reuse the processArray slots which are not alive by checking is_alive().</p> <p>This process runs fine for smaller number of processes however, for large number of messages say 100, Memory consumption goes through roof. I am thinking I have leak by reusing the process slots.</p> <p>Not sure what is wrong in the process. </p> <p>Thank you in advance for spotting an error or wise advise.</p>
0
2016-08-08T04:18:55Z
38,821,785
<p>Your code is, in a word, weird :-)</p> <p>It's not <a href="http://stackoverflow.com/help/mcve">an mvce</a>, so no one else can test it, but just looking at it, you have this (slightly simplified) structure in the inner loop:</p> <pre><code>if count &lt; limit: ... start a new process, and increment count ... else: do things that can potentially start even more processes (but never, ever, decrease count) </code></pre> <p>which seems unwise at best.</p> <p>There are no invocations of a process instance's <code>join()</code>, anywhere. (We'll get back to the outer loop and its <code>else</code> case in a bit.)</p> <p>Let's look more closely at the inner loop's <code>else</code> case code:</p> <pre><code> for i in range(len(processArray)): if (processArray[i].is_alive() == True): </code></pre> <p>Leaving aside the unnecessary <code>== True</code> test—which is a bit of a risk, since the <code>is_alive()</code> method does not specifically promise to return <code>True</code> and <code>False</code>, just something that works boolean-ly—consider <a href="https://docs.python.org/2/library/multiprocessing.html" rel="nofollow">this description from the documentation</a> (this link goes to py2k docs but py3k is the same, and your <code>print</code> statements imply your code is py2k anyway):</p> <blockquote> <p><code>is_alive()</code></p> <p>Return whether the process is alive.</p> <p>Roughly, a process object is alive from the moment the <code>start()</code> method returns until the child process terminates.</p> </blockquote> <p>Since we can't see the code for <code>dosomething</code>, it's hard to say whether these things ever terminate. Probably they do (by exiting), but if they don't, or don't soon enough, we could get problems here, where we just drop the message we pulled off the queue in the outer loop.</p> <p>If they <em>do</em> terminate, we just drop the process reference from the array, by overwriting it:</p> <pre><code> processArray[i] = Process(...) </code></pre> <p>The previous value in processArray[i] is discarded. It's not clear if you may have saved this anywhere else, but if you have not, the Process instance gets discarded, and now it is actually <em>impossible</em> to call its <code>join()</code> method.</p> <p>Some Python data structures tend to clean themselves up when abandoned (e.g., open streams flush output and close as needed), but the multiprocess code appears not to auto-join() its children. So this could be the, or a, source of the problem.</p> <p>Finally, whenever we do get to the <code>else</code> case in the outer loop, we have the same somewhat odd search for any alive processes—which, incidentally, can be written more clearly as:</p> <pre><code>if any(p.is_alive() for p in processArray): </code></pre> <p>as long as we don't care about which <em>particular</em> ones are alive, and which are not—and if none report themselves as alive, we reset the count, but never do anything with the variable <code>processArray</code>, so that each <code>processArray[i]</code> still holds the identity of the Process instance. (So at least we could call <code>join</code> on each of these, excluding any lost by overwriting.)</p> <p>Rather than building your own <code>Pool</code> yourself, you are probably better off using <code>multiprocess.Pool</code> and its <code>apply</code> and <code>apply_async</code> methods, as in <a href="http://stackoverflow.com/a/38821501/1256452">miraculixx's answer</a>.</p>
0
2016-08-08T05:21:31Z
[ "python", "memory-leaks", "python-multiprocessing" ]
Scrapy Installation - Issue with lxml install
38,821,297
<p>I am new to Python and I am trying to install scrapy, I have python 2.7.12 and pip 8.1.2 on windows 10. when I give the command 'pip install scrapy' it tries to install lxml and gives the below error. I downloaded the libxml2 binary, extracted to a folder and added the bin folder in path variable.But still the same issue. Please guide me with this issue, I am stuck with this. The error message is mentioned below.</p> <pre><code>cl : Command line warning D9025 : overriding '/W3' with '/w' lxml.etree.c src\lxml\includes\etree_defs.h(14) : fatal error C1083: Cannot open include file: 'libxml/xmlversion.h': No such file or directory Compile failed: command 'C:\\Users\\myuserid\\AppData\\Local\\Programs\\Common\\Microsoft\\Visual C++ for Python\\9.0\\VC\\Bin\\cl.exe' failed with exit status 2 creating users creating users\myuser~1 creating users\myuser~1\appdata creating users\myuser~1\appdata\local creating users\myuser~1\appdata\local\temp C:\Users\myuserid\AppData\Local\Programs\Common\Microsoft\Visual C++ for Python\9.0\VC\Bin\cl.exe /c /nologo /Ox /MD /W3 /GS- /DNDEBUG -I/usr/include/libxml2 /Tcc:\users\myuser~1\appdata\local\temp\xmlXPathInita7i8_a.c /Fousers\myuser~1\appdata\local\temp\xmlXPathInita7i8_a.obj xmlXPathInita7i8_a.c c:\users\myuser~1\appdata\local\temp\xmlXPathInita7i8_a.c(1) : fatal error C1083: Cannot open include file: 'libxml/xpath.h': No such file or directory ********************************************************************************* Could not find function xmlCheckVersion in library libxml2. Is libxml2 installed? ********************************************************************************* error: command 'C:\\Users\\myuserid\\AppData\\Local\\Programs\\Common\\Microsoft\\Visual C++ for Python\\9.0\\VC\\Bin\\cl.exe' failed with exit status 2 ---------------------------------------- Command "c:\softwares\python27\python.exe -u -c "import setuptools, tokenize;__file__='c:\\users\\myuser~1\\appdata\\local\\temp\\pip-build-m4bvsr\\lxml\\setup.py';exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\r\n', '\n'), __file__, 'exec'))" install --record c:\users\myuser~1\appdata\local\temp\pip-dnttln-record\install-record.txt --single-version-externally-managed --compile" failed with error code 1 in c:\users\myuser~1\appdata\local\temp\pip-build-m4bvsr\lxml\ </code></pre>
0
2016-08-08T04:26:24Z
38,822,808
<p>Scrapy's docummentation <a href="http://doc.scrapy.org/en/latest/intro/install.html#anaconda" rel="nofollow">recommends using Anaconda</a> to install the package. Lxml package is really tricky to install on windows and anaconda is a really straigh-forward shortcut. </p> <p>There's also unnoficial binaries of lxml <a href="http://lxml.de/installation.html#ms-windows" rel="nofollow">mentioned in lxml's docummentation</a> that you can use to install already compiled lxml package. There are more instructions as well, how to install lxml on windows os there too.</p>
0
2016-08-08T06:48:03Z
[ "python", "scrapy", "lxml", "libxml2", "failed-installation" ]
RS-232 Device Echoing All Serial Writes in PySerial
38,821,350
<p>My Python 2.7 program is using pySerial to communicate with a RS232/RS485 device (<a href="http://www.lesker.com/newweb/gauges/pdf/manuals/375usermanual.pdf" rel="nofollow">manual here, page 40</a>).</p> <p><strong>Problem:</strong> However it appears that everything that is written to the serial port is being echoed back as the response to <code>serial.readline</code> after timing out. Why is this?</p> <p><strong>print serial:</strong></p> <pre><code>Serial&lt;id=0x1006e4350, open=True&gt;(port='/dev/tty.usbserial-AL01MZAQ', baudrate=19200, bytesize=8, parity='N', stopbits=1, timeout=3, xonxoff=False) </code></pre> <p>For example, if I write to serial</p> <pre><code>#01RD\n </code></pre> <p>I get the response</p> <pre><code>#01RD\n </code></pre> <p>instead of </p> <pre><code>*01_7.60E+02\n </code></pre> <p>Changing the baud rate in pySerial but not on the device also causes the echoing on timing out after writing. Shouldnt having a non-matching baud rate causes gibberish to be returned?</p>
0
2016-08-08T04:34:05Z
39,060,400
<p>If your sending a string use a carriage return, not newline. You might have to use a carriage return to wake up the device before sending the command. Then strip out the extra characters on the print out statement. </p> <blockquote> <blockquote> <p>run cmd_send(x)</p> </blockquote> </blockquote> <p>x = '#01RD' def cmd_send(x):<br> try: ser.write(x +'\r') out = '' sleep(.1) while ser.inWaiting() > 0: out += ser.read(1) if out != '': print out else: print "nothing" except Exception, e: print "serial error: " + str(e)</p>
0
2016-08-21T02:41:09Z
[ "python", "serialization", "pyserial" ]
sorting a python list by frequency of elements
38,821,352
<p>I have this code which sorts python list by frequency of elements. It works for all other cases except when frequency of two elements are same. If the frequency is same then I want to place smaller value first before higher value:</p> <pre><code>counts = collections.Counter(arr) new_list = sorted(arr, key=lambda x: counts[x]) for item in new_list: print item </code></pre> <p>In case of <code>[3,1,2,2,4]</code> the output should be <code>[1,3,4,2,2]</code> but I get <code>[3,1,4,2,2]</code>. How do I resolve this error?</p>
1
2016-08-08T04:34:22Z
38,821,366
<p>You can set your key lambda function to a tuple, so, It will sort by <code>counts[x]</code> first and if there is a tie, it will sort by <code>x</code>, the value itself.</p> <pre><code> new_list = sorted(arr, key=lambda x: (counts[x], x)) </code></pre>
2
2016-08-08T04:35:46Z
[ "python", "list", "sorting" ]
sorting a python list by frequency of elements
38,821,352
<p>I have this code which sorts python list by frequency of elements. It works for all other cases except when frequency of two elements are same. If the frequency is same then I want to place smaller value first before higher value:</p> <pre><code>counts = collections.Counter(arr) new_list = sorted(arr, key=lambda x: counts[x]) for item in new_list: print item </code></pre> <p>In case of <code>[3,1,2,2,4]</code> the output should be <code>[1,3,4,2,2]</code> but I get <code>[3,1,4,2,2]</code>. How do I resolve this error?</p>
1
2016-08-08T04:34:22Z
38,821,392
<p>You are only sorting by the number of item. Under this logic, all the items which appear once have the same "weight", so python retains their original relevant position. E.g., <code>3</code> and <code>1</code> both appear once, so as far as their sorting is concerned, they are equivalent. Since <code>3</code> comes before <code>1</code> in the original list, it is also placed first in the result. </p> <p>Your required output calls for a secondary sort criteria - the value of the element. To do so, you must specify this explicitly:</p> <pre><code>new_list = sorted(arr, key=lambda x: (counts[x], x)) </code></pre>
1
2016-08-08T04:39:33Z
[ "python", "list", "sorting" ]
How to get response page after auth in selenium?
38,821,391
<p>I need to get response page after form submit, but i have same page with empty form fields. I am trying use <code>time.sleep()</code>, but it dosen`t help.</p> <pre><code>from selenium.webdriver.common.desired_capabilities import DesiredCapabilities import time phantom_js_executable_path = './bin/phantomjs.exe' user_agent = ( "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.37 (KHTML, like Gecko) Chrome/54.0.2705.89 Safari/536.37" ) dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = user_agent def main(): driver = webdriver.PhantomJS(executable_path = phantom_js_executable_path) # or add to your PATH driver.get('http://any.syte.my/login') user_name = driver.find_element_by_id("username") user_name.send_keys("user") password = driver.find_element_by_id("password") password.send_keys("pass") driver.find_element_by_name('continue').click()#form button submit #time.sleep(10) driver.save_screenshot('screen.png') # save a screenshot to disk if __name__ == '__main__': main() </code></pre>
0
2016-08-08T04:39:25Z
38,821,465
<p>You need to wait for an element in the page that you get after submitting user name and password.</p> <pre><code>ff = webdriver.Firefox() ff.get("http://somedomain/url_that_delays_loading") try: element = WebDriverWait(ff, 10).until(EC.presence_of_element_located((By.ID, "myDynamicElement"))) finally: ff.quit() </code></pre> <p>This is from <a href="http://www.seleniumhq.org/docs/04_webdriver_advanced.jsp" rel="nofollow">http://www.seleniumhq.org/docs/04_webdriver_advanced.jsp</a></p>
0
2016-08-08T04:47:23Z
[ "python", "selenium" ]
How to get response page after auth in selenium?
38,821,391
<p>I need to get response page after form submit, but i have same page with empty form fields. I am trying use <code>time.sleep()</code>, but it dosen`t help.</p> <pre><code>from selenium.webdriver.common.desired_capabilities import DesiredCapabilities import time phantom_js_executable_path = './bin/phantomjs.exe' user_agent = ( "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.37 (KHTML, like Gecko) Chrome/54.0.2705.89 Safari/536.37" ) dcap = dict(DesiredCapabilities.PHANTOMJS) dcap["phantomjs.page.settings.userAgent"] = user_agent def main(): driver = webdriver.PhantomJS(executable_path = phantom_js_executable_path) # or add to your PATH driver.get('http://any.syte.my/login') user_name = driver.find_element_by_id("username") user_name.send_keys("user") password = driver.find_element_by_id("password") password.send_keys("pass") driver.find_element_by_name('continue').click()#form button submit #time.sleep(10) driver.save_screenshot('screen.png') # save a screenshot to disk if __name__ == '__main__': main() </code></pre>
0
2016-08-08T04:39:25Z
38,823,481
<p>The following code will wait for an element to appear after your submit and will fail if the element is not located.</p> <pre><code>from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC WebDriverWait(self.driver, 10).until( EC.presence_of_element_located(By.ID, elementFromNewPage)) driver.save_screenshot('weee.png') </code></pre>
0
2016-08-08T07:28:36Z
[ "python", "selenium" ]