blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
220 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
257 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
39814591414bdd5dd5e964ffbd42f0692f1f0ef8
d1d2b41be8c7749e43c8f1fa876529de89e37f16
/utils/objreg.py
d3fc2bf14476afdd604de7fb309aac73258f3f67
[]
no_license
bellyfat/MultiCA
4faca0cb639f6e86e8518968354e80c0ffd5588f
d6bbb467e58c824e40708705185880fddcefde83
refs/heads/master
2023-03-15T23:09:23.957539
2017-04-10T23:03:33
2017-04-10T23:03:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
10,096
py
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """The global object registry and related utility functions.""" import collections import functools from PyQt5.QtCore import QObject, QTimer from utils import log class UnsetObject: """Class for an unset object. Only used (rather than object) so we can tell pylint to shut up about it. """ __slots__ = () class RegistryUnavailableError(Exception): """Exception raised when a certain registry does not exist yet.""" pass class NoWindow(Exception): """Exception raised by last_window if no window is available.""" _UNSET = UnsetObject() class ObjectRegistry(collections.UserDict): """A registry of long-living objects in qutebrowser. Inspired by the eric IDE code (E5Gui/E5Application.py). Attributes: _partial_objs: A dictionary of the connected partial objects. """ def __init__(self): super().__init__() self._partial_objs = {} def __setitem__(self, name, obj): """Register an object in the object registry. Sets a slot to remove QObjects when they are destroyed. """ if name is None: raise TypeError("Registering '{}' with name 'None'!".format(obj)) if obj is None: raise TypeError("Registering object None with name '{}'!".format( name)) self._disconnect_destroyed(name) if isinstance(obj, QObject): func = functools.partial(self.on_destroyed, name) obj.destroyed.connect(func) self._partial_objs[name] = func super().__setitem__(name, obj) def __delitem__(self, name): """Extend __delitem__ to disconnect the destroyed signal.""" self._disconnect_destroyed(name) super().__delitem__(name) def _disconnect_destroyed(self, name): """Disconnect the destroyed slot if it was connected.""" try: partial_objs = self._partial_objs except AttributeError: # This sometimes seems to happen on Travis during # test_history.test_adding_item_during_async_read # and I have no idea why... return if name in partial_objs: func = partial_objs[name] try: self[name].destroyed.disconnect(func) except (RuntimeError, TypeError): # If C++ has deleted the object, the slot is already # disconnected. # # With older PyQt-versions (5.2.1) we'll get a "TypeError: # pyqtSignal must be bound to a QObject" instead: # https://github.com/qutebrowser/qutebrowser/issues/257 pass del partial_objs[name] def on_destroyed(self, name): """Schedule removing of a destroyed QObject. We don't remove the destroyed object immediately because it might still be destroying its children, which might still use the object registry. """ log.destroy.debug("schedule removal: {}".format(name)) QTimer.singleShot(0, functools.partial(self._on_destroyed, name)) def _on_destroyed(self, name): """Remove a destroyed QObject.""" log.destroy.debug("removed: {}".format(name)) if not hasattr(self, 'data'): # This sometimes seems to happen on Travis during # test_history.test_adding_item_during_async_read # and I have no idea why... return try: del self[name] del self._partial_objs[name] except KeyError: pass def dump_objects(self): """Dump all objects as a list of strings.""" lines = [] for name, obj in self.data.items(): try: obj_repr = repr(obj) except (RuntimeError, TypeError): # Underlying object deleted probably obj_repr = '<deleted>' lines.append("{}: {}".format(name, obj_repr)) return lines # The registry for global objects global_registry = ObjectRegistry() # The window registry. window_registry = ObjectRegistry() def _get_tab_registry(win_id, tab_id): """Get the registry of a tab.""" if tab_id is None: raise ValueError("Got tab_id None (win_id {})".format(win_id)) if tab_id == 'current' and win_id is None: app = get('app') window = app.activeWindow() if window is None or not hasattr(window, 'win_id'): raise RegistryUnavailableError('tab') win_id = window.win_id elif win_id is not None: window = window_registry[win_id] else: raise TypeError("window is None with scope tab!") if tab_id == 'current': tabbed_browser = get('tabbed-browser', scope='window', window=win_id) tab = tabbed_browser.currentWidget() if tab is None: raise RegistryUnavailableError('window') tab_id = tab.tab_id tab_registry = get('tab-registry', scope='window', window=win_id) try: return tab_registry[tab_id].registry except AttributeError: raise RegistryUnavailableError('tab') def _get_window_registry(window): """Get the registry of a window.""" if window is None: raise TypeError("window is None with scope window!") try: if window == 'current': app = get('app') win = app.activeWindow() elif window == 'last-focused': win = last_focused_window() else: win = window_registry[window] except (KeyError, NoWindow): win = None try: return win.registry except AttributeError: raise RegistryUnavailableError('window') def _get_registry(scope, window=None, tab=None): """Get the correct registry for a given scope.""" if window is not None and scope not in ['window', 'tab']: raise TypeError("window is set with scope {}".format(scope)) if tab is not None and scope != 'tab': raise TypeError("tab is set with scope {}".format(scope)) if scope == 'global': return global_registry elif scope == 'tab': return _get_tab_registry(window, tab) elif scope == 'window': return _get_window_registry(window) else: raise ValueError("Invalid scope '{}'!".format(scope)) def get(name, default=_UNSET, scope='global', window=None, tab=None): """Helper function to get an object. Args: default: A default to return if the object does not exist. """ reg = _get_registry(scope, window, tab) try: return reg[name] except KeyError: if default is not _UNSET: return default else: raise def register(name, obj, update=False, scope=None, registry=None, window=None, tab=None): """Helper function to register an object. Args: name: The name the object will be registered as. obj: The object to register. update: If True, allows to update an already registered object. """ if scope is not None and registry is not None: raise ValueError("scope ({}) and registry ({}) can't be given at the " "same time!".format(scope, registry)) if registry is not None: reg = registry else: if scope is None: scope = 'global' reg = _get_registry(scope, window, tab) if not update and name in reg: raise KeyError("Object '{}' is already registered ({})!".format( name, repr(reg[name]))) reg[name] = obj def delete(name, scope='global', window=None, tab=None): """Helper function to unregister an object.""" reg = _get_registry(scope, window, tab) del reg[name] def dump_objects(): """Get all registered objects in all registries as a string.""" blocks = [] lines = [] blocks.append(('global', global_registry.dump_objects())) for win_id in window_registry: registry = _get_registry('window', window=win_id) blocks.append(('window-{}'.format(win_id), registry.dump_objects())) tab_registry = get('tab-registry', scope='window', window=win_id) for tab_id, tab in tab_registry.items(): dump = tab.registry.dump_objects() data = [' ' + line for line in dump] blocks.append((' tab-{}'.format(tab_id), data)) for name, data in blocks: lines.append("") lines.append("{} object registry - {} objects:".format( name, len(data))) for line in data: lines.append(" {}".format(line)) return lines def last_visible_window(): """Get the last visible window, or the last focused window if none.""" try: return get('last-visible-main-window') except KeyError: return last_focused_window() def last_focused_window(): """Get the last focused window, or the last window if none.""" try: return get('last-focused-main-window') except KeyError: return window_by_index(-1) def window_by_index(idx): """Get the Nth opened window object.""" if not window_registry: raise NoWindow() else: key = sorted(window_registry)[idx] return window_registry[key]
[ "rich@slyone.org" ]
rich@slyone.org
a4e86864532a808b15b5e79338f65769c9f59ef7
a2e638cd0c124254e67963bda62c21351881ee75
/Extensions/Default/FPythonCode/FOperationsGenerators.py
d6dd072a59bc78f8da23b710047f349b73f6dd9e
[]
no_license
webclinic017/fa-absa-py3
1ffa98f2bd72d541166fdaac421d3c84147a4e01
5e7cc7de3495145501ca53deb9efee2233ab7e1c
refs/heads/main
2023-04-19T10:41:21.273030
2021-05-10T08:50:05
2021-05-10T08:50:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,719
py
""" Compiled: 2020-09-18 10:38:53 """ #__src_file__ = "extensions/operations/etc/FOperationsGenerators.py" #------------------------------------------------------------------------- # Generator for generating pairs of related objects. #------------------------------------------------------------------------- class PairGenerator(object): #------------------------------------------------------------------------- class Compare: EQUAL = 0 PREDECESSOR = 1 SUCCESSOR = 2 #------------------------------------------------------------------------- @staticmethod def __Next(objs): try: obj = next(objs) except StopIteration as _: obj = None return obj #------------------------------------------------------------------------- @staticmethod def Generate(objs1, objs2, functor): obj1 = PairGenerator.__Next(objs1) obj2 = PairGenerator.__Next(objs2) while obj1 or obj2: compare = functor(obj1, obj2) if obj1 and obj2 else None if compare == PairGenerator.Compare.EQUAL: yield obj1, obj2 obj1 = PairGenerator.__Next(objs1) obj2 = PairGenerator.__Next(objs2) elif (obj1 and not obj2) or compare == PairGenerator.Compare.PREDECESSOR: yield obj1, None obj1 = PairGenerator.__Next(objs1) elif (obj2 and not obj1) or compare == PairGenerator.Compare.SUCCESSOR: yield None, obj2 obj2 = PairGenerator.__Next(objs2)
[ "81222178+nenchoabsa@users.noreply.github.com" ]
81222178+nenchoabsa@users.noreply.github.com
87b86983f5b9c127daaf39b762790b56ebfb345a
8762f79893c1984ce0dc527a189af815acd22aac
/bike_crawler/venv/lib/python3.7/site-packages/crochet/_eventloop.py
d6ba8063b3bc2d3414ce56ba267e2930a89f9c04
[]
no_license
nancywan1004/bike-crawler
3991c12a4e0fa32fa6f82434ec39bcf939cd9166
f086fbf977b520be54c38ad404481dbe56ef730d
refs/heads/master
2020-09-14T22:49:47.775163
2019-11-29T04:15:14
2019-11-29T04:15:14
223,281,779
1
0
null
null
null
null
UTF-8
Python
false
false
18,133
py
""" Expose Twisted's event loop to threaded programs. """ from __future__ import absolute_import import select import threading import weakref import warnings from functools import wraps import imp from twisted.python import threadable from twisted.python.runtime import platform from twisted.python.failure import Failure from twisted.python.log import PythonLoggingObserver, err from twisted.internet.defer import maybeDeferred from twisted.internet.task import LoopingCall import wrapt from ._util import synchronized from ._resultstore import ResultStore _store = ResultStore() if hasattr(weakref, "WeakSet"): WeakSet = weakref.WeakSet else: class WeakSet(object): """ Minimal WeakSet emulation. """ def __init__(self): self._items = weakref.WeakKeyDictionary() def add(self, value): self._items[value] = True def __iter__(self): return iter(self._items) class TimeoutError(Exception): # pylint: disable=redefined-builtin """ A timeout has been hit. """ class ReactorStopped(Exception): """ The reactor has stopped, and therefore no result will ever become available from this EventualResult. """ class ResultRegistry(object): """ Keep track of EventualResults. Once the reactor has shutdown: 1. Registering new EventualResult instances is an error, since no results will ever become available. 2. Already registered EventualResult instances are "fired" with a ReactorStopped exception to unblock any remaining EventualResult.wait() calls. """ def __init__(self): self._results = WeakSet() self._stopped = False self._lock = threading.Lock() @synchronized def register(self, result): """ Register an EventualResult. May be called in any thread. """ if self._stopped: raise ReactorStopped() self._results.add(result) @synchronized def stop(self): """ Indicate no more results will get pushed into EventualResults, since the reactor has stopped. This should be called in the reactor thread. """ self._stopped = True for result in self._results: result._set_result(Failure(ReactorStopped())) class EventualResult(object): """ A blocking interface to Deferred results. This allows you to access results from Twisted operations that may not be available immediately, using the wait() method. In general you should not create these directly; instead use functions decorated with @run_in_reactor. """ def __init__(self, deferred, _reactor): """ The deferred parameter should be a Deferred or None indicating _connect_deferred will be called separately later. """ self._deferred = deferred self._reactor = _reactor self._value = None self._result_retrieved = False self._result_set = threading.Event() if deferred is not None: self._connect_deferred(deferred) def _connect_deferred(self, deferred): """ Hook up the Deferred that that this will be the result of. Should only be run in Twisted thread, and only called once. """ self._deferred = deferred # Because we use __del__, we need to make sure there are no cycles # involving this object, which is why we use a weakref: def put(result, eventual=weakref.ref(self)): eventual = eventual() if eventual: eventual._set_result(result) else: err(result, "Unhandled error in EventualResult") deferred.addBoth(put) def _set_result(self, result): """ Set the result of the EventualResult, if not already set. This can only happen in the reactor thread, either as a result of Deferred firing, or as a result of ResultRegistry.stop(). So, no need for thread-safety. """ if self._result_set.isSet(): return self._value = result self._result_set.set() def __del__(self): if self._result_retrieved or not self._result_set.isSet(): return if isinstance(self._value, Failure): err(self._value, "Unhandled error in EventualResult") def cancel(self): """ Try to cancel the operation by cancelling the underlying Deferred. Cancellation of the operation may or may not happen depending on underlying cancellation support and whether the operation has already finished. In any case, however, the underlying Deferred will be fired. Multiple calls will have no additional effect. """ self._reactor.callFromThread(lambda: self._deferred.cancel()) def _result(self, timeout=None): """ Return the result, if available. It may take an unknown amount of time to return the result, so a timeout option is provided. If the given number of seconds pass with no result, a TimeoutError will be thrown. If a previous call timed out, additional calls to this function will still wait for a result and return it if available. If a result was returned on one call, additional calls will return/raise the same result. """ if timeout is None: warnings.warn( "Unlimited timeouts are deprecated.", DeprecationWarning, stacklevel=3) # Queue.get(None) won't get interrupted by Ctrl-C... timeout = 2**31 self._result_set.wait(timeout) # In Python 2.6 we can't rely on the return result of wait(), so we # have to check manually: if not self._result_set.is_set(): raise TimeoutError() self._result_retrieved = True return self._value def wait(self, timeout=None): """ Return the result, or throw the exception if result is a failure. It may take an unknown amount of time to return the result, so a timeout option is provided. If the given number of seconds pass with no result, a TimeoutError will be thrown. If a previous call timed out, additional calls to this function will still wait for a result and return it if available. If a result was returned or raised on one call, additional calls will return/raise the same result. """ if threadable.isInIOThread(): raise RuntimeError( "EventualResult.wait() must not be run in the reactor thread.") if imp.lock_held(): try: imp.release_lock() except RuntimeError: # The lock is held by some other thread. We should be safe # to continue. pass else: # If EventualResult.wait() is run during module import, if the # Twisted code that is being run also imports something the # result will be a deadlock. Even if that is not an issue it # would prevent importing in other threads until the call # returns. raise RuntimeError( "EventualResult.wait() must not be run at module " "import time.") result = self._result(timeout) if isinstance(result, Failure): result.raiseException() return result def stash(self): """ Store the EventualResult in memory for later retrieval. Returns a integer uid which can be passed to crochet.retrieve_result() to retrieve the instance later on. """ return _store.store(self) def original_failure(self): """ Return the underlying Failure object, if the result is an error. If no result is yet available, or the result was not an error, None is returned. This method is useful if you want to get the original traceback for an error result. """ try: result = self._result(0.0) except TimeoutError: return None if isinstance(result, Failure): return result else: return None class ThreadLogObserver(object): """ A log observer that wraps another observer, and calls it in a thread. In particular, used to wrap PythonLoggingObserver, so that blocking logging.py Handlers don't block the event loop. """ def __init__(self, observer): self._observer = observer if getattr(select, "epoll", None): from twisted.internet.epollreactor import EPollReactor reactorFactory = EPollReactor elif getattr(select, "poll", None): from twisted.internet.pollreactor import PollReactor reactorFactory = PollReactor else: from twisted.internet.selectreactor import SelectReactor reactorFactory = SelectReactor self._logWritingReactor = reactorFactory() self._logWritingReactor._registerAsIOThread = False self._thread = threading.Thread( target=self._reader, name="CrochetLogWriter") self._thread.start() def _reader(self): """ Runs in a thread, reads messages from a queue and writes them to the wrapped observer. """ self._logWritingReactor.run(installSignalHandlers=False) def stop(self): """ Stop the thread. """ self._logWritingReactor.callFromThread(self._logWritingReactor.stop) def __call__(self, msg): """ A log observer that writes to a queue. """ def log(): try: self._observer(msg) except Exception: # Lower-level logging system blew up, nothing we can do, so # just drop on the floor. pass self._logWritingReactor.callFromThread(log) class EventLoop(object): """ Initialization infrastructure for running a reactor in a thread. """ def __init__( self, reactorFactory, atexit_register, startLoggingWithObserver=None, watchdog_thread=None, reapAllProcesses=None ): """ reactorFactory: Zero-argument callable that returns a reactor. atexit_register: atexit.register, or look-alike. startLoggingWithObserver: Either None, or twisted.python.log.startLoggingWithObserver or lookalike. watchdog_thread: crochet._shutdown.Watchdog instance, or None. reapAllProcesses: twisted.internet.process.reapAllProcesses or lookalike. """ self._reactorFactory = reactorFactory self._atexit_register = atexit_register self._startLoggingWithObserver = startLoggingWithObserver self._started = False self._lock = threading.Lock() self._watchdog_thread = watchdog_thread self._reapAllProcesses = reapAllProcesses def _startReapingProcesses(self): """ Start a LoopingCall that calls reapAllProcesses. """ lc = LoopingCall(self._reapAllProcesses) lc.clock = self._reactor lc.start(0.1, False) def _common_setup(self): """ The minimal amount of setup done by both setup() and no_setup(). """ self._started = True self._reactor = self._reactorFactory() self._registry = ResultRegistry() # We want to unblock EventualResult regardless of how the reactor is # run, so we always register this: self._reactor.addSystemEventTrigger( "before", "shutdown", self._registry.stop) @synchronized def setup(self): """ Initialize the crochet library. This starts the reactor in a thread, and connect's Twisted's logs to Python's standard library logging module. This must be called at least once before the library can be used, and can be called multiple times. """ if self._started: return self._common_setup() if platform.type == "posix": self._reactor.callFromThread(self._startReapingProcesses) if self._startLoggingWithObserver: observer = ThreadLogObserver(PythonLoggingObserver().emit) def start(): # Twisted is going to override warnings.showwarning; let's # make sure that has no effect: from twisted.python import log original = log.showwarning log.showwarning = warnings.showwarning self._startLoggingWithObserver(observer, False) log.showwarning = original self._reactor.callFromThread(start) # We only want to stop the logging thread once the reactor has # shut down: self._reactor.addSystemEventTrigger( "after", "shutdown", observer.stop) t = threading.Thread( target=lambda: self._reactor.run(installSignalHandlers=False), name="CrochetReactor") t.start() self._atexit_register(self._reactor.callFromThread, self._reactor.stop) self._atexit_register(_store.log_errors) if self._watchdog_thread is not None: self._watchdog_thread.start() @synchronized def no_setup(self): """ Initialize the crochet library with no side effects. No reactor will be started, logging is uneffected, etc.. Future calls to setup() will have no effect. This is useful for applications that intend to run Twisted's reactor themselves, and so do not want libraries using crochet to attempt to start it on their own. If no_setup() is called after setup(), a RuntimeError is raised. """ if self._started: raise RuntimeError( "no_setup() is intended to be called once, by a" " Twisted application, before any libraries " "using crochet are imported and call setup().") self._common_setup() @wrapt.decorator def _run_in_reactor(self, function, _, args, kwargs): """ Implementation: A decorator that ensures the wrapped function runs in the reactor thread. When the wrapped function is called, an EventualResult is returned. """ def runs_in_reactor(result, args, kwargs): d = maybeDeferred(function, *args, **kwargs) result._connect_deferred(d) result = EventualResult(None, self._reactor) self._registry.register(result) self._reactor.callFromThread(runs_in_reactor, result, args, kwargs) return result def run_in_reactor(self, function): """ A decorator that ensures the wrapped function runs in the reactor thread. When the wrapped function is called, an EventualResult is returned. """ result = self._run_in_reactor(function) # Backwards compatibility; use __wrapped__ instead. try: result.wrapped_function = function except AttributeError: pass return result def wait_for_reactor(self, function): """ DEPRECATED, use wait_for(timeout) instead. A decorator that ensures the wrapped function runs in the reactor thread. When the wrapped function is called, its result is returned or its exception raised. Deferreds are handled transparently. """ warnings.warn( "@wait_for_reactor is deprecated, use @wait_for instead", DeprecationWarning, stacklevel=2) # This will timeout, in theory. In practice the process will be dead # long before that. return self.wait_for(2**31)(function) def wait_for(self, timeout): """ A decorator factory that ensures the wrapped function runs in the reactor thread. When the wrapped function is called, its result is returned or its exception raised. Deferreds are handled transparently. Calls will timeout after the given number of seconds (a float), raising a crochet.TimeoutError, and cancelling the Deferred being waited on. """ def decorator(function): @wrapt.decorator def wrapper(function, _, args, kwargs): @self.run_in_reactor def run(): return function(*args, **kwargs) eventual_result = run() try: return eventual_result.wait(timeout) except TimeoutError: eventual_result.cancel() raise result = wrapper(function) # Expose underling function for testing purposes; this attribute is # deprecated, use __wrapped__ instead: try: result.wrapped_function = function except AttributeError: pass return result return decorator def in_reactor(self, function): """ DEPRECATED, use run_in_reactor. A decorator that ensures the wrapped function runs in the reactor thread. The wrapped function will get the reactor passed in as a first argument, in addition to any arguments it is called with. When the wrapped function is called, an EventualResult is returned. """ warnings.warn( "@in_reactor is deprecated, use @run_in_reactor", DeprecationWarning, stacklevel=2) @self.run_in_reactor @wraps(function) def add_reactor(*args, **kwargs): return function(self._reactor, *args, **kwargs) return add_reactor
[ "leqiwan@alumni.ubc.ca" ]
leqiwan@alumni.ubc.ca
d3e983b7ab56cd37804c08f4163ad217a274efb2
cdc7df6e2bc2d637fee27fb243b0a5bc1757c916
/events/migrations/0004_auto_20180414_1513.py
abee47d26c3702dede480805914a330bab58ad9b
[ "MIT" ]
permissive
sinkva/pktroop
0548b13212fd849b8d33afb9383cd9f2e629c8bd
72a8f22f0b0f7c994d6ba2239b2ea17a46b6e133
refs/heads/master
2021-09-14T02:11:16.509047
2018-05-07T13:59:30
2018-05-07T13:59:30
113,116,893
0
0
null
null
null
null
UTF-8
Python
false
false
717
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2018-04-14 15:13 from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('events', '0003_auto_20180414_1512'), ] operations = [ migrations.AddField( model_name='event', name='created_at', field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now), preserve_default=False, ), migrations.AddField( model_name='event', name='updated_at', field=models.DateTimeField(auto_now=True), ), ]
[ "github@kuo1.com" ]
github@kuo1.com
621578ecfb09a3690255ff500ef39f1646d416b7
d1f0978c544033699402d0682d75f25af4efa5a2
/util.py
6dce1b766df6602ab51a00e035e70c8f34ab3bf9
[]
no_license
kjack1111/Enarthia-bot
20867085bee0c800f2599bd2bb17bf0e3e4c6b2e
b33c4fd93a2b44c18858a09835d06688bc6913b5
refs/heads/master
2020-04-01T06:15:03.168385
2018-11-18T03:11:02
2018-11-18T03:11:02
152,939,400
3
1
null
null
null
null
UTF-8
Python
false
false
115
py
async def timeout(client, M): await client.send_message(M.channel, "The command timed out! Please try again.")
[ "kjackgilchrist@gmail.com" ]
kjackgilchrist@gmail.com
2de9f086b9867ed8b8e4efa37b61f5d90ceaf5c8
bd6fbfe23ad91c4638011530c640d237c698627f
/recorder.py
c00cf075f02123bea36fa0640762952af0e6b94b
[ "Apache-2.0" ]
permissive
VishwaasHegde/CRETORA
5a3efa257d23ef0227ec86486a3c3db6ec7611b0
a03b25e5cf52aa7d4dee23d45fedc5d7b1351dca
refs/heads/main
2023-06-15T02:24:20.409075
2021-07-11T16:29:59
2021-07-11T16:29:59
377,884,380
1
0
null
null
null
null
UTF-8
Python
false
false
535
py
import sounddevice as sd from scipy.io.wavfile import write fs = 16000 # Sample rate def record(seconds): key = input('Press 1 to start recording or press 0 to exit: ') if key == str(0): return None print('Started recording for {} seconds'.format(seconds)) myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=1) sd.wait() # Wait until recording is finished # write('data/sample_data/Bhup_25.wav', rate=16000, data=myrecording) print('Stopped recording') return myrecording[:,0]
[ "vishwaas.n" ]
vishwaas.n
1154f55391b8710921a61cad7912bb4ba2a26ea8
da0ae46bf509e93f230434cf650f718519480633
/ckanext/suggests/controllers/suggest_controller.py
2fe6e10d56ca4baed6c6f82433a70326f515514c
[ "MIT" ]
permissive
WilJoey/ckanext-suggests
bc2787255e766428f5876ba8bbb3591295428301
58eeddbafa3677582cd9ac1c5b6a832aa56d1e2b
refs/heads/master
2021-01-25T05:23:17.390346
2015-03-27T11:05:44
2015-03-27T11:05:44
32,977,061
0
0
null
null
null
null
UTF-8
Python
false
false
12,672
py
import logging import ckan.lib.base as base import ckan.model as model import ckan.plugins as plugins import ckan.lib.helpers as helpers import ckanext.suggests.constants as constants import functools import re from ckan.common import request from urllib import urlencode ########################################################################################################## _link = re.compile(r'') #### _link = re.compile(r'(?:(http://)|(www\.))(\S+\b/?)([!"#$%&\'()*+,\-./:;<=>?@[\\\]^_`{|}~]*)(\s|$)', re.I) ########################################################################################################### log = logging.getLogger(__name__) tk = plugins.toolkit c = tk.c def convert_links(text): def replace(match): groups = match.groups() protocol = groups[0] or '' # may be None www_lead = groups[1] or '' # may be None return '<a href="http://{1}{2}" target="_blank">{0}{1}{2}</a>{3}{4}'.format( protocol, www_lead, *groups[2:]) return _link.sub(replace, text) def _get_errors_summary(errors): errors_summary = {} for key, error in errors.items(): errors_summary[key] = ', '.join(error) return errors_summary def _encode_params(params): return [(k, v.encode('utf-8') if isinstance(v, basestring) else str(v)) for k, v in params] def url_with_params(url, params): params = _encode_params(params) return url + u'?' + urlencode(params) def search_url(params): url = helpers.url_for(controller='ckanext.suggests.controllers.suggest_controller:SuggestsController', action='index') return url_with_params(url, params) class SuggestsController(base.BaseController): def _get_context(self): return {'model': model, 'session': model.Session, 'user': c.user, 'auth_user_obj': c.userobj} def _show_index(self, url_func, file_to_render): def pager_url(q=None, page=None): params = list() params.append(('page', page)) return url_func(params) try: context = self._get_context() page = int(request.GET.get('page', 1)) limit = constants.SUGGESTS_PER_PAGE offset = (page - 1) * constants.SUGGESTS_PER_PAGE data_dict = {'offset': offset, 'limit': limit} state = request.GET.get('state', None) if state: data_dict['closed'] = True if state == 'closed' else False tk.check_access(constants.SUGGEST_INDEX, context, data_dict) suggests_list = tk.get_action(constants.SUGGEST_INDEX)(context, data_dict) c.suggest_count = suggests_list['count'] c.suggests = suggests_list['result'] c.search_facets = suggests_list['facets'] c.page = helpers.Page( collection=suggests_list['result'], page=page, url=pager_url, item_count=suggests_list['count'], items_per_page=limit ) c.facet_titles = { 'state': tk._('State'), } return tk.render(file_to_render) except ValueError as e: # This exception should only occur if the page value is not valid log.warn(e) tk.abort(400, tk._('"page" parameter must be an integer')) except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('Unauthorized to list Data Requests')) def index(self): return self._show_index( search_url, 'suggests/index.html') def _process_post(self, action, context): # If the user has submitted the form, the data request must be created if request.POST: data_dict = {} data_dict['title'] = request.POST.get('title', '') data_dict['description'] = request.POST.get('description', '') if action == constants.SUGGEST_UPDATE: data_dict['id'] = request.POST.get('id', '') try: result = tk.get_action(action)(context, data_dict) tk.response.status_int = 302 tk.response.location = '/%s/%s' % (constants.SUGGESTS_MAIN_PATH, result['id']) except tk.ValidationError as e: log.warn(e) # Fill the fields that will display some information in the page c.suggest = { 'id': data_dict.get('id', ''), 'title': data_dict.get('title', ''), 'description': data_dict.get('description', '') } c.errors = e.error_dict c.errors_summary = _get_errors_summary(c.errors) def new(self): context = self._get_context() # Basic intialization c.suggest = {} c.errors = {} c.errors_summary = {} # Check access try: tk.check_access(constants.SUGGEST_CREATE, context, None) self._process_post(constants.SUGGEST_CREATE, context) # The form is always rendered return tk.render('suggests/new.html') except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('Unauthorized to create a Data Request')) def show(self, id): data_dict = {'id': id} context = self._get_context() try: tk.check_access(constants.SUGGEST_SHOW, context, data_dict) c.suggest = tk.get_action(constants.SUGGEST_SHOW)(context, data_dict) context_ignore_auth = context.copy() context_ignore_auth['ignore_auth'] = True return tk.render('suggests/show.html') except tk.ObjectNotFound as e: tk.abort(404, tk._('Data Request %s not found') % id) except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('You are not authorized to view the Data Request %s' % id)) def update(self, id): data_dict = {'id': id} context = self._get_context() # Basic intialization c.suggest = {} c.errors = {} c.errors_summary = {} try: tk.check_access(constants.SUGGEST_UPDATE, context, data_dict) c.suggest = tk.get_action(constants.SUGGEST_SHOW)(context, data_dict) c.original_title = c.suggest.get('title') self._process_post(constants.SUGGEST_UPDATE, context) return tk.render('suggests/edit.html') except tk.ObjectNotFound as e: log.warn(e) tk.abort(404, tk._('Data Request %s not found') % id) except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('You are not authorized to update the Data Request %s' % id)) def delete(self, id): data_dict = {'id': id} context = self._get_context() try: tk.check_access(constants.SUGGEST_DELETE, context, data_dict) suggest = tk.get_action(constants.SUGGEST_DELETE)(context, data_dict) tk.response.status_int = 302 tk.response.location = '/%s' % constants.SUGGESTS_MAIN_PATH helpers.flash_notice(tk._('Data Request %s deleted correctly') % suggest.get('title', '')) except tk.ObjectNotFound as e: log.warn(e) tk.abort(404, tk._('Data Request %s not found') % id) except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('You are not authorized to delete the Data Request %s' % id)) def close(self, id): data_dict = {'id': id} context = self._get_context() # Basic intialization c.suggest = {} def _return_page(errors={}, errors_summary={}): base_datasets = tk.get_action('package_search')({'ignore_auth': True}, {'rows': 500})['results'] c.datasets = [] c.errors = errors c.errors_summary = errors_summary for dataset in base_datasets: c.datasets.append({'name': dataset.get('name'), 'title': dataset.get('title')}) return tk.render('suggests/close.html') try: tk.check_access(constants.SUGGEST_CLOSE, context, data_dict) c.suggest = tk.get_action(constants.SUGGEST_SHOW)(context, data_dict) if c.suggest.get('closed', False): tk.abort(403, tk._('This data request is already closed')) elif request.POST: data_dict = {} data_dict['id'] = id tk.get_action(constants.SUGGEST_CLOSE)(context, data_dict) tk.response.status_int = 302 tk.response.location = '/%s/%s' % (constants.SUGGESTS_MAIN_PATH, data_dict['id']) else: # GET return _return_page() except tk.ValidationError as e: # Accepted Dataset is not valid log.warn(e) errors_summary = _get_errors_summary(e.error_dict) return _return_page(e.error_dict, errors_summary) except tk.ObjectNotFound as e: log.warn(e) tk.abort(404, tk._('Data Request %s not found') % id) except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('You are not authorized to close the Data Request %s' % id)) def comment(self, id): try: context = self._get_context() data_dict_comment_list = {'suggest_id': id} data_dict_dr_show = {'id': id} tk.check_access(constants.SUGGEST_COMMENT_LIST, context, data_dict_comment_list) comment = request.POST.get('comment', '') comment_id = request.POST.get('comment-id', '') if request.POST: try: comment_data_dict = {'suggest_id': id, 'comment': comment, 'id': comment_id} action = constants.SUGGEST_COMMENT if not comment_id else constants.SUGGEST_COMMENT_UPDATE comment = tk.get_action(action)(context, comment_data_dict) except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('You are not authorized to create/edit the comment')) except tk.ValidationError as e: log.warn(e) c.errors = e.error_dict c.errors_summary = _get_errors_summary(c.errors) c.comment = comment except tk.ObjectNotFound as e: log.warn(e) tk.abort(404, tk._('Data Request %s not found') % id) # TODO: Fix me... this function is not called if an exception is risen when the comment is # being created # Comments should be retrieved once that the comment has been created get_comments_data_dict = {'suggest_id': id} c.comments = tk.get_action(constants.SUGGEST_COMMENT_LIST)(context, get_comments_data_dict) c.suggest = tk.get_action(constants.SUGGEST_SHOW)(context, data_dict_dr_show) # Replace URLs by links # Replace new lines by HTML line break for comment in c.comments: comment['comment'] = convert_links(comment['comment']) comment['comment'] = comment['comment'].replace('\n', '<br/>') except tk.ObjectNotFound as e: log.warn(e) tk.abort(404, tk._('Data Request %s not found' % id)) except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('You are not authorized to comment the Data Request %s' % id)) return tk.render('suggests/comment.html') def delete_comment(self, suggest_id, comment_id): print 'that feeling is the best thing, alright' try: context = self._get_context() data_dict = {'id': comment_id} tk.get_action(constants.SUGGEST_COMMENT_DELETE)(context, data_dict) tk.response.status_int = 302 tk.response.location = '/%s/comment/%s' % (constants.SUGGESTS_MAIN_PATH, suggest_id) except tk.ObjectNotFound as e: log.warn(e) tk.abort(404, tk._('Comment %s not found') % comment_id) except tk.NotAuthorized as e: log.warn(e) tk.abort(403, tk._('You are not authorized to delete this comment'))
[ "wiljoey@gmail.com" ]
wiljoey@gmail.com
aa485daed43aec74f3451a00b7b8fbc9f7869c9a
ce7fa1fabd322e09021d5e23d3500d82fbc3dcd1
/CRDS.py
e29d9544277219a011722b0250486034f628c020
[]
no_license
esun0087/SPOJ-Solution
e71e59de9730cdad4f463c21c97ff53a14e5d996
4eb9f648fcf9d6e91d3db6a6cf8de732d7f7479e
refs/heads/master
2021-01-11T04:46:12.415125
2015-06-12T11:19:46
2015-06-12T11:19:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
228
py
import sys inp = sys.stdin.readlines() mod = 1000007 for i in xrange(1,int(inp[0])+1): n = int(inp[i]) if n%2 == 0: S = ((n/2)*(((3*n)%mod+1)%mod))%mod else: S = (n*(((3*n+1)/2)%mod))%mod print S
[ "niraj_kr_das@yahoo.in" ]
niraj_kr_das@yahoo.in
2576188caa2fe025cb8a785fe18dde6d50a7acb5
0bb5143af13cab85346f0443a1c2ba39eac40bfd
/zipfunction.py
650ccfec84d8a95db7e75884817080a55d6930ca
[]
no_license
Nasir1004/-practical_python-with-daheer
62e766d5946df4ea29ebbd059ef904c536c281e5
3d5a6ac466685aeaf8736de7886a4b7e2641d6ef
refs/heads/master
2020-09-22T02:22:12.054111
2019-11-30T10:31:44
2019-11-30T10:31:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
134
py
first = ['nasir', 'abbas', 'sharu'] last = ['ibrahim', 'dahir', 'nasir'] names = zip(first, last) for a, b in names: print(a, b)
[ "ibrahimnasir618@gmail.com" ]
ibrahimnasir618@gmail.com
ecb0b78414194f4b28d464b9bc67c8af598753f5
b1f5d364832cbf9309628660faefa0c0f950b754
/accounts/urls.py
8c2e8a471c8dcfa422eb77395ada5ed19e474a7f
[]
no_license
cachimiro/final-proyect
04272c4c0ed47977d1614b2bb99466424ddcd446
90b0917805285a7b302b1ab3793fdc1f2a2ec062
refs/heads/master
2021-09-27T13:54:54.204800
2020-01-14T13:44:41
2020-01-14T13:44:41
232,796,879
0
0
null
2021-09-22T18:22:46
2020-01-09T11:49:43
Python
UTF-8
Python
false
false
390
py
from django.conf.urls import url, include from accounts import url_reset from .views import index, register, profile, logout, login urlpatterns = [ url(r'^register/$', register, name='register'), url(r'^profile/$', profile, name='profile'), url(r'^logout/$', logout, name='logout'), url(r'^login/$', login, name='login'), url(r'^password-reset/', include(url_reset)), ]
[ "johannaguirre55@gmail.com" ]
johannaguirre55@gmail.com
1c17f1472b345de9ea5859b8faad6b065b3a52ae
511dbb4c1b132263ff2f943eee574edf6963a027
/CricketWidget/WidgetCricbuzz.py
64d66d6b9aa4ebb8d758701c37fd6a2c06e33911
[]
no_license
ElToro13/Python
b41c07c227d2574178de474e04958448bf1b57d9
90e2c741f4d851a6866907a0cc9c12cd088c4c8c
refs/heads/master
2022-12-09T05:51:50.287085
2019-04-27T23:30:34
2019-04-27T23:30:34
98,705,694
0
0
null
2022-12-08T00:43:56
2017-07-29T03:31:37
Python
UTF-8
Python
false
false
10,112
py
from tkinter import * import tkinter as tk from tkinter import ttk from tkinter import scrolledtext from Class_Cricbuzz import * from pycricbuzz import Cricbuzz import time def scoreb(i): root = Tk() sco = Cricbuzz() Total = sco.scorecard(i) ii = len(Total['scorecard'][0]['batcard']) Label(root, text="Batsman",bg="#F0B27A").grid(row=0, column=0, sticky=W) Label(root, text="Dismissal",bg="#F0B27A").grid(row=0, column=1, sticky=W) Label(root, text="Runs",bg="#F0B27A").grid(row=0, column=2, sticky=W) Label(root, text="Balls",bg="#F0B27A").grid(row=0, column=3, sticky=W) for k in range(1,ii+1): name = Total['scorecard'][0]['batcard'][k-1]['name'] diss = Total['scorecard'][0]['batcard'][k-1]['dismissal'] run = Total['scorecard'][0]['batcard'][k-1]['runs'] ball = Total['scorecard'][0]['batcard'][k-1]['balls'] Label(root, text=name).grid(row=k, column=0, sticky=W) Label(root, text=diss).grid(row=k, column=1, sticky=W) Label(root, text=run).grid(row=k, column=2, sticky=W) Label(root, text=ball).grid(row=k, column=3, sticky=W) uu = len(Total['scorecard'][0]['bowlcard']) Label(root, text="Bowler",bg="#F0B27A").grid(row=ii+2, column=0, sticky=W) Label(root, text="O",bg="#F0B27A").grid(row=ii+2, column=2, sticky=W) Label(root, text="M",bg="#F0B27A").grid(row=ii+2, column=3, sticky=W) Label(root, text="R",bg="#F0B27A").grid(row=ii+2, column=4, sticky=W) Label(root, text="W",bg="#F0B27A").grid(row=ii+2, column=5, sticky=W) if(len(Total['scorecard'])==1): ddd = Total['scorecard'][1]['runs'] wick = Total['scorecard'][1]['wickets'] oo = Total['scorecard'][1]['overs'] Label(root, text=ddd+" in "+oo+" for "+wick,bg="#F0B27A").grid(row=ii+1, column=0, sticky=W) else: ddd = Total['scorecard'][0]['runs'] wick = Total['scorecard'][0]['wickets'] oo = Total['scorecard'][0]['overs'] Label(root, text=ddd+" in "+oo+" for "+wick,bg="#F0B27A").grid(row=ii+1, column=0, sticky=W) for e in range(11,uu+11): name = Total['scorecard'][0]['bowlcard'][e-11]['name'] over = Total['scorecard'][0]['bowlcard'][e-11]['overs'] maiden = Total['scorecard'][0]['bowlcard'][e-11]['maidens'] runs = Total['scorecard'][0]['bowlcard'][e-11]['runs'] wicket = Total['scorecard'][0]['bowlcard'][e-11]['wickets'] Label(root, text=name).grid(row=e+11, column=0, sticky=W) Label(root, text=over).grid(row=e+11, column=2, sticky=W) Label(root, text=maiden).grid(row=e+11, column=3, sticky=W) Label(root, text=runs).grid(row=e+11, column=4, sticky=W) Label(root, text=wicket).grid(row=e+11, column=5, sticky=W) #time.sleep(1) root.mainloop() def scoreb2(i): #try: root1 = Tk() sco = Cricbuzz() Total = sco.scorecard(i) ii = len(Total['scorecard'][1]['batcard']) Label(root1, text="Batsman",bg="#F0B27A").grid(row=0, column=0, sticky=W) Label(root1, text="Dismissal",bg="#F0B27A").grid(row=0, column=1, sticky=W) Label(root1, text="Runs",bg="#F0B27A").grid(row=0, column=2, sticky=W) Label(root1, text="Balls",bg="#F0B27A").grid(row=0, column=3, sticky=W) for k in range(1,ii+1): name = Total['scorecard'][1]['batcard'][k-1]['name'] diss = Total['scorecard'][1]['batcard'][k-1]['dismissal'] run = Total['scorecard'][1]['batcard'][k-1]['runs'] ball = Total['scorecard'][1]['batcard'][k-1]['balls'] Label(root1, text=name).grid(row=k, column=0, sticky=W) Label(root1, text=diss).grid(row=k, column=1, sticky=W) Label(root1, text=run).grid(row=k, column=2, sticky=W) Label(root1, text=ball).grid(row=k, column=3, sticky=W) uu = len(Total['scorecard'][1]['bowlcard']) Label(root1, text="Bowler",bg="#F0B27A").grid(row=ii+2, column=0, sticky=W) Label(root1, text="O",bg="#F0B27A").grid(row=ii+2, column=2, sticky=W) Label(root1, text="M",bg="#F0B27A").grid(row=ii+2, column=3, sticky=W) Label(root1, text="R",bg="#F0B27A").grid(row=ii+2, column=4, sticky=W) Label(root1, text="W",bg="#F0B27A").grid(row=ii+2, column=5, sticky=W) ddd = Total['scorecard'][1]['runs'] wick = Total['scorecard'][1]['wickets'] oo = Total['scorecard'][1]['overs'] Label(root1, text=ddd+" in "+oo+" for "+wick,bg="#F0B27A").grid(row=ii+1, column=0, sticky=W) for e in range(12,uu+12): name = Total['scorecard'][1]['bowlcard'][e-12]['name'] over = Total['scorecard'][1]['bowlcard'][e-12]['overs'] maiden = Total['scorecard'][1]['bowlcard'][e-12]['maidens'] runs = Total['scorecard'][1]['bowlcard'][e-12]['runs'] wicket = Total['scorecard'][1]['bowlcard'][e-12]['wickets'] Label(root1, text=name).grid(row=e+11, column=0, sticky=W) Label(root1, text=over).grid(row=e+11, column=2, sticky=W) Label(root1, text=maiden).grid(row=e+11, column=3, sticky=W) Label(root1, text=runs).grid(row=e+11, column=4, sticky=W) Label(root1, text=wicket).grid(row=e+11, column=5, sticky=W) #time.sleep(1) root1.mainloop() #except: # print('Second Innings is yet to start') def Refresh(h): #root.destroy() ss = Match(h) sss = ss.Livescore() class Match(Batsman, Bowler, Score): def __init__(self, id=1): Batsman.__init__(self, batsman1="", batsman2="", runs1="", runs2="", balls1="", balls2="", four1="", four2="", six1="", six2="") Bowler.__init__(self, bowler1="", bowler2="", over1="", over2="", runsconceded1="", runsconceded2="", maiden1="", maiden2="", wicket1="", wicket2="") Score.__init__(self, score="", overs="", wickets="") self.ID = id def Livescore(self): root = Tk() try: while True: ODI = Cricbuzz() Total = ODI.livescore(self.ID) MatchTitle = Total['matchinfo']['srs'] numm = Total['matchinfo']['mnum'] Label(root, text=MatchTitle, bg="#BB8FCE").grid(row=0, column=0, sticky=W) Label(root, text=numm, relief=RAISED, bg="#BB8FCE").grid(row=0, column=2, sticky=E) cc = Score(Total['batting']['score'][0]['runs'], Total['batting']['score'][0]['overs'], Total['batting']['score'][0]['wickets']) ccc = Total['batting']['team'] + " : " + cc.total() Label(root, text=ccc, relief=RAISED,bg="#3498DB", fg="#FDFEFE").grid(row=1, column=0, sticky=W) button = tk.Button(root, text='Scorecard', relief=RAISED,command=lambda: scoreb2(self.ID)) button.grid(row=4, column=1) button = tk.Button(root, text='Refresh', relief=RAISED,command=lambda: Refresh(self.ID)) button.grid(row=1, column=2) button = tk.Button(root, text='Scorecard', relief=RAISED,command=lambda: scoreb(self.ID)) button.grid(row=1, column=1) if(len(Total['batting']['batsman'])==2): bb = Batsman(Total['batting']['batsman'][0]['name'], Total['batting']['batsman'][1]['name'], Total['batting']['batsman'][0]['runs'], Total['batting']['batsman'][1]['runs'], Total['batting']['batsman'][0]['balls'], Total['batting']['batsman'][1]['balls'], Total['batting']['batsman'][0]['fours'], Total['batting']['batsman'][1]['fours'], Total['batting']['batsman'][0]['six'], Total['batting']['batsman'][1]['six']) bbb1, bbb2= bb.TwoBatsmen() Label(root, text=bbb1).grid(row=2, column=0, sticky=W) Label(root, text=bbb2).grid(row=3, column=0, sticky=W) varbowlingteam = "Bowling Side: " + Total['bowling']['team'] Label(root, text=varbowlingteam, relief = RAISED, bg="#F1C40F").grid(row=4, column=0, sticky=W) if(len(Total['bowling']['bowler'])==2): ww =Bowler(Total['bowling']['bowler'][0]['name'], Total['bowling']['bowler'][1]['name'], Total['bowling']['bowler'][0]['overs'], Total['bowling']['bowler'][1]['overs'], Total['bowling']['bowler'][0]['runs'], Total['bowling']['bowler'][1]['runs'], Total['bowling']['bowler'][0]['maidens'], Total['bowling']['bowler'][1]['maidens'], Total['bowling']['bowler'][0]['wickets'], Total['bowling']['bowler'][1]['wickets']) www1, www2 = ww.TwoBowler() Label(root, text=www1).grid(row=5, column=0, sticky=W) Label(root, text=www2).grid(row=6, column=0, sticky=W) var6 = "Result: " + Total['matchinfo']['status'] Label(root, text=var6, bg="#FDFEFE").grid(row=7, column=0,sticky=W) scr = scrolledtext.ScrolledText(root, width=30, height=10, wrap=tk.WORD) scr.grid(row=8,column=0, sticky='WE', columnspan=3) root.update() ty = Tk() cc = ODI.commentary(self.ID) dd = cc['commentary'] for i in range(len(dd)-1,1,-1): scr.insert('1.0', dd[i]+"\n \n") ty.close() except ValueError: print("Value Error") except IndexError: print("Index Error") except KeyError: print("KeyError") except: print("Thank you for using Cricbuzz Widget- Powered by PyCricbuzz")
[ "noreply@github.com" ]
ElToro13.noreply@github.com
d2ec78700adbdabb41836c5003016d18c394db8a
4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5
/test/promotesale/QueryFullReductionTest.py
872c46b84243b6ed269a15938975388fe619df59
[]
no_license
shijingyu/sunningAPI
241f33b0660dc84635ce39688fed499f5c57a5da
4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5
refs/heads/master
2020-04-24T22:15:11.584028
2019-02-24T06:41:20
2019-02-24T06:41:20
172,305,179
0
0
null
null
null
null
UTF-8
Python
false
false
475
py
#!usr/bin/python # -*- coding: utf-8 -*- ''' Created on 2014-10-17 @author: suning ''' import sys import os basepath = os.path.dirname(os.path.abspath(sys.argv[0]))+"/../../" sys.path.append(basepath) import suning.api as api a=api.QueryFullReductionRequest() a.pageNo='1' a.pageSize='2' a.startTime='2014-09-09 12:00:00' a.endTime='2014-09-19 12:00:00' a.promotionRange='1' a.statusCode='1' try: f = a.getResponse() print(f) except Exception as e: print(e)
[ "945090896@qq.com" ]
945090896@qq.com
a9d0a7485ba678bfc517fabaf0f4af8e3e934ef1
95b2d1096cb263d07e049eab2d6707f1acb3b085
/nanoctf/ext/themes.py
df6561e2cbf9401bbe123350c41c330a22674252
[]
no_license
Sven-Sch/nanoCTF
068ede4a156950a9a0f612ce213f527c2046744c
e850d2d01d2d35162eda2b2997f6aa7d73aeb802
refs/heads/master
2021-01-12T07:34:21.367142
2016-12-20T18:31:30
2016-12-20T18:31:30
36,599,546
0
0
null
null
null
null
UTF-8
Python
false
false
177
py
#!/usr/bin/env python # -*- coding: utf-8 -*- from flask_themes2 import Themes def configure(app): themes = Themes() themes.init_themes(app, app_identifier='nanoctf')
[ "funkym0nk3y@acmelabs.vpn" ]
funkym0nk3y@acmelabs.vpn
78801955bc863de34214f1f1cdc3bb2843539e29
6f2a424a74b22695b65cdff5f06b2c42f8c01540
/post/migrations/0001_initial.py
42a3bf6499f48629e9bb7837ce49e0f3f92dff70
[]
no_license
saurabh-jindal/blog
d082a0ae23c79424b0c92b97077d64ad9613d170
39b083bb5399eccdb9d9258ce05425963082b7ab
refs/heads/master
2022-12-17T07:46:55.169491
2020-09-25T13:29:22
2020-09-25T13:29:22
298,564,002
0
0
null
null
null
null
UTF-8
Python
false
false
1,159
py
# Generated by Django 3.1.1 on 2020-09-25 08:38 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Tag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=30, unique=True)), ], ), migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField()), ('image', models.ImageField(default='', upload_to='images/')), ('tags', models.ManyToManyField(to='post.Tag')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[ "jindalsaurabh533@gmail.com" ]
jindalsaurabh533@gmail.com
bc91f6c3d59ca8f650fe1a4456caba86df29ab50
bee9d96912078d68877aa53e0c96537677ec3e6a
/peakpo/control/jcpdscontroller.py
45d17941513eb3e86946eed8c7238ac55fde688b
[ "Apache-2.0" ]
permissive
SHDShim/PeakPo
ce0a637b6307787dd84fd3dcb3415e752d180c32
4c522e147e7715bceba218de58ee185cccd2055e
refs/heads/master
2022-06-26T11:26:45.097828
2022-06-19T22:03:24
2022-06-19T22:03:24
94,345,216
17
3
null
null
null
null
UTF-8
Python
false
false
11,139
py
import os import copy from PyQt5 import QtWidgets from PyQt5 import QtCore from PyQt5 import QtGui # import matplotlib.pyplot as plt from matplotlib import colors import matplotlib.cm as cmx from .mplcontroller import MplController from .jcpdstablecontroller import JcpdsTableController from utils import xls_jlist, dialog_savefile, make_filename, get_temp_dir, \ InformationBox, extract_filename, extract_extension from ds_jcpds import JCPDS import pymatgen as mg import datetime class JcpdsController(object): def __init__(self, model, widget): self.model = model self.widget = widget self.jcpdstable_ctrl = JcpdsTableController(self.model, self.widget) self.plot_ctrl = MplController(self.model, self.widget) self.connect_channel() def connect_channel(self): self.widget.pushButton_NewJlist.clicked.connect(self.make_jlist) self.widget.pushButton_RemoveJCPDS.clicked.connect(self.remove_a_jcpds) self.widget.pushButton_AddToJlist.clicked.connect( lambda: self.make_jlist(append=True)) self.widget.checkBox_Intensity.clicked.connect( lambda: self._apply_changes_to_graph(limits=None)) """ self.widget.pushButton_CheckAllJCPDS.clicked.connect( self.check_all_jcpds) self.widget.pushButton_UncheckAllJCPDS.clicked.connect( self.uncheck_all_jcpds) """ self.widget.pushButton_MoveUp.clicked.connect(self.move_up_jcpds) self.widget.pushButton_MoveDown.clicked.connect(self.move_down_jcpds) self.widget.pushButton_ExportXLS.clicked.connect(self.save_xls) self.widget.pushButton_ViewJCPDS.clicked.connect(self.view_jcpds) self.widget.checkBox_JCPDSinPattern.clicked.connect( lambda: self._apply_changes_to_graph(limits=None)) self.widget.checkBox_JCPDSinCake.clicked.connect( lambda: self._apply_changes_to_graph(limits=None)) self.widget.pushButton_ForceUpdatePlot.clicked.connect( lambda: self._apply_changes_to_graph(limits=None)) self.widget.pushButton_SaveTwkJCPDS.clicked.connect( self.write_twk_jcpds) def _apply_changes_to_graph(self, limits=None): self.plot_ctrl.update(limits=limits) def _find_a_jcpds(self): idx_checked = \ self.widget.tableWidget_JCPDS.selectionModel().selectedRows() if idx_checked == []: return None else: return idx_checked[0].row() def make_jlist(self, append=False): """ collect files for jlist """ files = QtWidgets.QFileDialog.getOpenFileNames( self.widget, "Choose JPCDS Files", self.model.jcpds_path, "(*.jcpds)")[0] if files == []: return self.model.set_jcpds_path(os.path.split(str(files[0]))[0]) self._make_jlist(files, append=append) def _make_jlist(self, files, append=False): n_color = 20 # jet = plt.get_cmap('gist_rainbow') jet = cmx.get_cmap('gist_rainbow') cNorm = colors.Normalize(vmin=0, vmax=n_color) c_index = range(n_color) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet) c_value = [value for value in c_index] """ [c_index[0], c_index[3], c_index[6], c_index[1], c_index[4], c_index[7], c_index[2], c_index[5], c_index[8]] """ if append: n_existingjcpds = self.model.jcpds_lst.__len__() n_addedjcpds = files.__len__() if ((n_existingjcpds + n_addedjcpds) > n_color): i = 0 else: i = n_existingjcpds else: self.model.reset_jcpds_lst() i = 0 for f in files: color = colors.rgb2hex(scalarMap.to_rgba(c_value[i])) if self.model.append_a_jcpds(str(f), color): i += 1 if i >= n_color - 1: i = 0 else: QtWidgets.QMessageBox.warning( self.widget, "Warning", f+" seems to have errors in format.") # display on the QTableWidget self.jcpdstable_ctrl.update() if self.model.base_ptn_exist(): self._apply_changes_to_graph() else: self._apply_changes_to_graph(limits=(0., 25., 0., 100.)) def move_up_jcpds(self): # get selected cell number idx_selected = self._find_a_jcpds() if idx_selected is None: QtWidgets.QMessageBox.warning(self.widget, "Warning", "Highlight the item to move first.") return i = idx_selected if i == 0: return former_below = copy.copy(self.model.jcpds_lst[i]) former_above = copy.copy(self.model.jcpds_lst[i-1]) self.model.jcpds_lst[i - 1], self.model.jcpds_lst[i] = \ former_below, former_above # self.model.jcpds_lst[i - 1], self.model.jcpds_lst[i] = \ # self.model.jcpds_lst[i], self.model.jcpds_lst[i - 1] self.widget.tableWidget_JCPDS.clearContents() self.jcpdstable_ctrl.update() self.widget.tableWidget_JCPDS.selectRow(i - 1) def move_down_jcpds(self): # get selected cell number idx_selected = self._find_a_jcpds() if idx_selected is None: QtWidgets.QMessageBox.warning(self.widget, "Warning", "Highlight the item to move first.") return i = idx_selected if i >= self.model.jcpds_lst.__len__() - 1: return former_below = copy.copy(self.model.jcpds_lst[i+1]) former_above = copy.copy(self.model.jcpds_lst[i]) self.model.jcpds_lst[i + 1], self.model.jcpds_lst[i] = \ former_above, former_below # self.model.jcpds_lst[i + 1], self.model.jcpds_lst[i] = \ # self.model.jcpds_lst[i], self.model.jcpds_lst[i + 1] self.widget.tableWidget_JCPDS.clearContents() self.jcpdstable_ctrl.update() self.widget.tableWidget_JCPDS.selectRow(i + 1) """ self.widget.tableWidget_JCPDS.setCurrentItem( self.widget.tableWidget_JCPDS.item(i + 1, 1)) self.widget.tableWidget_JCPDS.setItemSelected( self.widget.tableWidget_JCPDS.item(i + 1, 1), True) self.widget.tableWidget_JCPDS.setItemSelected( self.widget.tableWidget_JCPDS.item(i, 1), False) """ """ def check_all_jcpds(self): if not self.model.jcpds_exist(): return for phase in self.model.jcpds_lst: phase.display = True self.jcpdstable_ctrl.update() self._apply_changes_to_graph() def uncheck_all_jcpds(self): if not self.model.jcpds_exist(): return for phase in self.model.jcpds_lst: phase.display = False self.jcpdstable_ctrl.update() self._apply_changes_to_graph() """ def remove_a_jcpds(self): reply = QtWidgets.QMessageBox.question( self.widget, 'Message', 'Are you sure you want to remove the highlighted JPCDSs?', QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No, QtWidgets.QMessageBox.Yes) if reply == QtWidgets.QMessageBox.No: return # print self.widget.tableWidget_JCPDS.selectedIndexes().__len__() idx_checked = [s.row() for s in self.widget.tableWidget_JCPDS.selectionModel(). selectedRows()] # remove checked ones if idx_checked != []: idx_checked.reverse() for idx in idx_checked: self.model.jcpds_lst.remove(self.model.jcpds_lst[idx]) self.widget.tableWidget_JCPDS.removeRow(idx) # self.update_table() self._apply_changes_to_graph() else: QtWidgets.QMessageBox.warning( self.widget, 'Warning', 'In order to remove, highlight the names.') def save_xls(self): """ Export jlist to an excel file """ if not self.model.jcpds_exist(): return temp_dir = get_temp_dir(self.model.get_base_ptn_filename()) filen_xls_t = make_filename(self.model.get_base_ptn_filename(), 'jlist.xls', temp_dir=temp_dir) filen_xls = dialog_savefile(self.widget, filen_xls_t) if str(filen_xls) == '': return xls_jlist(filen_xls, self.model.jcpds_lst, self.widget.doubleSpinBox_Pressure.value(), self.widget.doubleSpinBox_Temperature.value()) def view_jcpds(self): if not self.model.jcpds_exist(): return idx_checked = [ s.row() for s in self.widget.tableWidget_JCPDS.selectionModel().selectedRows()] if idx_checked == []: QtWidgets.QMessageBox.warning( self.widget, "Warning", "Highlight the name of JCPDS to view") return if idx_checked.__len__() != 1: QtWidgets.QMessageBox.warning( self.widget, "Warning", "Only one JCPDS card can be shown at a time.") else: textoutput = self.model.jcpds_lst[idx_checked[0]].make_TextOutput( self.widget.doubleSpinBox_Pressure.value(), self.widget.doubleSpinBox_Temperature.value()) infobox = InformationBox() infobox.setText(textoutput) print(str(datetime.datetime.now())[:-7], ": Show JCPDS \n", textoutput) infobox.exec_() #self.widget.plainTextEdit_ViewJCPDS.setPlainText(textoutput) def write_twk_jcpds(self): if not self.model.jcpds_exist(): return idx_checked = [ s.row() for s in self.widget.tableWidget_JCPDS.selectionModel().selectedRows()] if idx_checked == []: QtWidgets.QMessageBox.warning( self.widget, "Warning", "Highlight the name of JCPDS to write twk jcpds.") return if idx_checked.__len__() != 1: QtWidgets.QMessageBox.warning( self.widget, "Warning", "Only one JCPDS card can be written at a time.") return # get filename to write path, __ = os.path.split(self.model.get_base_ptn_filename()) suggested_filen = os.path.join( path, self.model.jcpds_lst[idx_checked[0]].name + '-twk.jcpds') filen_twk_jcpds = dialog_savefile(self.widget, suggested_filen) if filen_twk_jcpds == '': return # make comments comments = "modified from " + \ self.model.jcpds_lst[idx_checked[0]].file + \ ", twk for " + \ self.model.base_ptn.fname self.model.jcpds_lst[idx_checked[0]].write_to_twk_jcpds( filen_twk_jcpds, comments=comments)
[ "SHDShim@gmail.com" ]
SHDShim@gmail.com
3e237cfd5fe8786cc4840ec4fee31d555573cc91
fa71c4453520d7e7b00a5245cee3628628070e91
/Coursera/Python Spring 2013/Assignment_3_NumberGame.py
f5b5296de4ca13862c34f2d1d50a9406b8847186
[]
no_license
jeffwindsor/learn
6b6ecc67a8453206c193e9527d7c6e0c12177f34
a7fc1dac5d5f4c3986761ffb30bb8255f8e9ade0
refs/heads/master
2021-07-15T20:48:26.089887
2021-06-25T01:27:17
2021-06-25T01:27:17
41,703,235
0
0
null
null
null
null
UTF-8
Python
false
false
2,763
py
# template for "Guess the number" mini-project # input will come from buttons and an input field # all output for the game will be printed in the console import simplegui, random, math # initialize global variables used in your code low = 0 high = 0 guess_target = 0 remaining_guesses = 0 def display_guess_result(guess, result): print "Your guess of %s %s" %(guess, result) def evaluate_remaining_guesses(): if (remaining_guesses < 1): print "You have run out of guesses" print "The correct answer was %i, the computer wins.\n" %(guess_target) start_new_game() else: print "You have %i guesses remaining\n" %(remaining_guesses) def set_target_boundries(new_low, new_high): global low, high low = new_low high = new_high def start_new_game(): global guess_target, remaining_guesses #computer to guess a random int, low <= guess_target < high guess_target = random.randrange(low, high) # number of guesses equals the smallest integer value above the # log of (high - low + 1) remaining_guesses = math.ceil(math.log((high - low + 1), 2)) print "** Starting new game **" print "Guess a number between", low, "and", high print "Debug: Target", guess_target evaluate_remaining_guesses() # define event handlers for control panel def range100(): # button that changes range to range [0,100) and restarts set_target_boundries(0,100) start_new_game() def range1000(): # button that changes range to range [0,1000) and restarts set_target_boundries(0,1000) start_new_game() def get_input(guess): # main game logic goes here global remaining_guesses if guess.isdigit(): guess = int(guess) if guess == guess_target: display_guess_result(guess, "is correct, you win!\n") start_new_game() else: display_guess_result(guess, ("is to low" if (guess < guess_target) else "is to high")) #decrement remaining_guesses remaining_guesses -= 1 evaluate_remaining_guesses() else: display_guess_result(guess, "is not a valid integer, try again.") # set initial game to range of 0 to 100 range100() # create frame frame = simplegui.create_frame("Guess the Number", 150, 200) # register event handlers for control elements input_guess = frame.add_input("Guess", get_input, 50) button_restart_100 = frame.add_button("Range: 0 - 100",range100) button_restart_1000 = frame.add_button("Range: 0 - 1000",range1000) # start frame frame.start() # always remember to check your completed program against the grading rubric
[ "jeff.windsor@gmail.com" ]
jeff.windsor@gmail.com
8d5f57d9f9d2d32a2bee12d64593e7e073b6369b
5b05ef6b03d21eb2e5dbaa15a76059de408f2558
/selenium_start/pages/ff_support.py
84c1f468820d55dc06fca33c891afdb63e2b8392
[]
no_license
flavia-c/Mozilla
c1727af0f7615daa741deb709792b133f5b19214
d570671732f6d2ec2ff0b31a753cfa6aa8af2a63
refs/heads/master
2020-03-24T08:08:27.523874
2018-07-27T14:21:26
2018-07-27T14:21:26
142,586,246
0
0
null
null
null
null
UTF-8
Python
false
false
619
py
from selenium.webdriver.common.by import By from .basepage import BasePage class FfSupport(BasePage): _search_box_selector = (By.ID, 'search-q') _support_button_selector = (By.CSS_SELECTOR, '#get-involved-box .btn.btn-submit') """def __init__(self, selenium, variables, open_url=False): suffix_url = '/en-US/docs/Web/HTTP' super().__init__(selenium, variables, open_url, suffix_url)""" def confirm_page_load(self): self.is_visible(self._search_box_selector) def verify_support_button_is_clickable(self): return self.check_clickable(self._support_button_selector)
[ "flavia.croitoru@3pillarglobal.com" ]
flavia.croitoru@3pillarglobal.com
ee987dc97b5aa0a7529752d0e719651d989c6283
741ee09b8b73187fab06ecc1f07f46a6ba77e85c
/AutonomousSourceCode/data/raw/sort/d0d3b906-00e8-4b06-aa81-423fdf44d307__mergesort.py
4121ebe48ffcab855687335df0292d65e95b9edb
[]
no_license
erickmiller/AutomatousSourceCode
fbe8c8fbf215430a87a8e80d0479eb9c8807accb
44ee2fb9ac970acf7389e5da35b930d076f2c530
refs/heads/master
2021-05-24T01:12:53.154621
2020-11-20T23:50:11
2020-11-20T23:50:11
60,889,742
6
1
null
null
null
null
UTF-8
Python
false
false
1,800
py
# nlogn, divide and conquer # recursive def merge_sort(int_array): # base case if len(int_array) == 0: return None elif len(int_array) == 1: return int_array # recursive step else: l = len(int_array)/2 first_half = int_array[:l] second_half = int_array[l:] sorted_first_half = merge_sort(first_half) sorted_second_half = merge_sort(second_half) return merge_sorted_lists(sorted_first_half, sorted_second_half) def merge_sorted_lists(first, second): sorted_complete_list = [] while first or second: if first and second: if first[0] <= second[0]: sorted_complete_list.append(first[0]) first = first[1:] else: sorted_complete_list.append(second[0]) second = second[1:] elif first: sorted_complete_list.extend(first) break elif second: sorted_complete_list.extend(second) break return sorted_complete_list if __name__ == "__main__": # from pudb import set_trace; set_trace() eight_element_list = [8, 0, 12, 2, 5, 7, 3, 10] print eight_element_list print merge_sort(eight_element_list) print odd_number_element_list = [-10, 5, 2, 7, 6, 4.4, 3.75] print odd_number_element_list print merge_sort(odd_number_element_list) print list_w_dups = [8, 8, 3, 3, 3, 4, 4, 0] print list_w_dups print merge_sort(list_w_dups) print sorted_list = [1, 1, 3, 3, 6, 6, 9, 9, 1000, 1000, 5000, 5000, 100000000] print sorted_list print merge_sort(sorted_list) print rev_sorted_list = [10, 9, 8, 7, 6, 0, -5, -10] print rev_sorted_list print merge_sort(rev_sorted_list) print
[ "erickmiller@gmail.com" ]
erickmiller@gmail.com
d8e84cf721c759a8fde3138782a033b35746d27f
c09a4b4f02849c03ba536edda2bf920b655be6bc
/wyl/mvis2uvd.py
915db718f98decc67ab738874bf2d62bda69f28b
[]
no_license
jpober/brownscripts
33bcc70a31694dfb06f1314adb1402316540108c
c25789ec765b018eaad59d99a0a4264c75655265
refs/heads/master
2021-01-23T22:01:19.004636
2020-11-12T18:39:14
2020-11-12T18:39:14
57,912,669
2
2
null
null
null
null
UTF-8
Python
false
false
4,616
py
import sys,optparse,aipy,glob import numpy as np, mp2cal import pyuvdata.uvdata as uvd o = optparse.OptionParser() o.set_usage('mvis2uvd.py [options] obsid') #only takes 1 obsid o.set_description(__doc__) o.add_option('-d',dest='datpath',default='/users/wl42/data/wl42/FHD_out/fhd_MWA_PhaseII_EoR0/',type='string', help='Path to data. Include final / in path.') o.add_option('-s',dest='solpath',default='/users/wl42/data/wl42/Nov2016EoR0/mdl_sol/',type='string', help='Path to omnical solutions. Include final / in path.') o.add_option('-o',dest='outpath',default='/users/wl42/data/wl42/MDLVIS/',type='string', help='Path to save uvfits. Include final / in path.') opts,args = o.parse_args(sys.argv[1:]) exec('from PhaseII_cal import *') obsid = args[0] uv = uvd.UVData() print ' Loading data' fhdlist = glob.glob(opts.datpath+'vis_data/'+obsid+'*') + glob.glob(opts.datpath+'metadata/'+obsid+'*') uv.read_fhd(fhdlist,run_check=False,run_check_acceptability=False) print ' Loading mdlvis' npz_x = np.load(opts.solpath+obsid+'.xx.omni.npz') npz_y = np.load(opts.solpath+obsid+'.yy.omni.npz') ant = [] for k in npz_x.keys(): if k[0].isdigit(): ant.append(int(k[0:-1])) ant.sort() mdvis = {'xx':{}, 'yy':{}} info = mp2cal.wyl.pos_to_info(antpos,ants=ant) a1, a2 = [], [] for ii in range(info.nAntenna): for jj in range(ii,info.nAntenna): a1.append(ii) a2.append(jj) ant_dict = {} for a in ant: ant_dict[info.ant_index(a)] = a reds = info.get_reds() reds_ind = {} ubls = [] chix = npz_x['chisq2'] chiy = npz_y['chisq2'] maskx = chix > 1.2 masky = chiy > 1.2 flag = {} flag['xx'] = np.logical_or(npz_x['flags'], maskx) flag['yy'] = np.logical_or(npz_y['flags'], masky) mask = {'xx': npz_x['flags'], 'yy': npz_y['flags']} for key in npz_x.keys(): if key.startswith('<'): bl,pol = key.split() bl = tuple(map(int,bl[1:-1].split(','))) mdvis[pol][bl] = npz_x[key] ubls.append(bl) for key in npz_y.keys(): if key.startswith('<'): bl,pol = key.split() bl = tuple(map(int,bl[1:-1].split(','))) mdvis[pol][bl] = npz_y[key] for r in reds: ubl = None for bl in ubls: if bl in r: ubl = bl if ubl is None: continue for b in r: reds_ind[b] = ubl Nbls0 = uv.Nbls Nbls1 = info.nAntenna*(info.nAntenna+1)/2 b0 = 128*uv.ant_1_array[:Nbls0] + uv.ant_2_array[:Nbls0] uv.Nbls = Nbls1 uv.Nants_data = info.nAntenna uv.Nants_telescope = info.nAntenna uv.Nblts = uv.Ntimes*uv.Nbls times = np.resize(np.unique(uv.time_array),(uv.Nbls,uv.Ntimes)).T uv.time_array = np.resize(times,(times.size)) lsts = np.resize(np.unique(uv.lst_array),(uv.Nbls,uv.Ntimes)).T uv.lst_array = np.resize(lsts,(lsts.size)) uvw = np.zeros((uv.Nblts,3)) sample = np.ones(chix.shape)*16 for ii in range(384): if ii%16 == 8: sample[:,ii] = 8 uv.ant_1_array = np.array(a1*uv.Ntimes) uv.ant_2_array = np.array(a2*uv.Ntimes) b1 = 128*uv.ant_1_array[:Nbls1] + uv.ant_2_array[:Nbls1] for ii in range(uv.Nbls): i = b1[ii]/128 j = b1[ii]%128 ai = ant_dict[i] aj = ant_dict[j] try: ubli,ublj = reds_ind[(ai,aj)] except: ubli,ublj = ai,aj try: ind = np.where(b0 == 128*ubli + ublj)[0][0] uvw[ii::Nbls1] = uv.uvw_array[ind::Nbls0] except: ind = np.where(b0 == 128*ublj + ubli)[0][0] uvw[ii::Nbls1] = -uv.uvw_array[ind::Nbls0] uv.uvw_array = uvw uv.nsample_array = np.zeros((uv.Nblts,uv.Nspws,uv.Nfreqs,uv.Npols)) uv.data_array = np.zeros((uv.Nblts,uv.Nspws,uv.Nfreqs,uv.Npols),dtype=np.complex64) uv.flag_array = np.ones((uv.Nblts,uv.Nspws,uv.Nfreqs,uv.Npols),dtype=bool) uv.baseline_array = uv.antnums_to_baseline(uv.ant_1_array,uv.ant_2_array) uv.antenna_positions = np.zeros((info.nAntenna,3)) uv.antenna_numbers = np.arange(info.nAntenna) uv.antenna_names = [] for ii in range(info.nAntenna): uv.antenna_names.append(str(ii)) for pp in ['xx','yy']: pn = aipy.miriad.str2pol[pp] pid = np.where(uv.polarization_array==pn)[0][0] for ii in range(uv.Nbls): i = a1[ii] j = a2[ii] ai = ant_dict[i] aj = ant_dict[j] if (ai,aj) in reds_ind.keys(): vis = mdvis[pp][reds_ind[(ai,aj)]] elif (aj,ai) in reds_ind.keys(): vis = mdvis[pp][reds_ind[(aj,ai)]].conj() else: continue uv.data_array[:,0][:,:,pid][ii::uv.Nbls] = vis uv.flag_array[:,0][:,:,pid][ii::uv.Nbls] = flag[pp] uv.nsample_array[:,0][:,:,pid][ii::uv.Nbls] = sample*np.logical_not(mask[pp]) outuvfits = opts.outpath + obsid + '_mvis.uvfits' print ' Writing ' + outuvfits uv.write_uvfits(outuvfits,spoof_nonessential=True)
[ "wenyang_li@brown.edu" ]
wenyang_li@brown.edu
925f40e33fde6a2820212b79be8eddad514edb16
5447499810138e0fe78122b01d30924322fe26d8
/examples/example.py
cc1fb3687a0e65da725a414445a356da1a315263
[ "MIT" ]
permissive
mathfac/habitat-api
d225925b504d077e142d74af6849ce564d8fd8c9
6a49edfca53f0fec99fd0f0c8e5d4828589167f2
refs/heads/master
2020-05-09T13:12:42.695103
2019-04-13T08:47:43
2019-04-13T08:47:43
181,142,429
0
0
MIT
2019-04-13T08:47:44
2019-04-13T08:26:24
Python
UTF-8
Python
false
false
708
py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import habitat def example(): env = habitat.Env( config=habitat.get_config(config_file="tasks/pointnav.yaml") ) print("Environment creation successful") observations = env.reset() print("Agent stepping around inside environment.") count_steps = 0 while not env.episode_over: observations = env.step(env.action_space.sample()) count_steps += 1 print("Episode finished after {} steps.".format(count_steps)) if __name__ == "__main__": example()
[ "abhishekkadiyan@gmail.com" ]
abhishekkadiyan@gmail.com
bca775c71a982c091d9ff1e42bd3398929ca5c9d
c0bc52faf567e7c473bf829e266f6bac0ebdddfe
/geo_prior/geo_prior/grid_predictor.py
197148afbab9891a59b547e8e3e050618ba8e97d
[ "Apache-2.0" ]
permissive
daima2017/space2vec
3c34318eceb9ac8cc288c58bc8770d4d409d2851
a29793336e6a1ebdb497289c286a0b4d5a83079f
refs/heads/master
2023-03-24T22:26:29.575726
2020-06-07T00:39:57
2020-06-07T00:39:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,307
py
""" Class for making dense predictions on a 2D grid. """ import numpy as np import torch import math import sys sys.path.append('../') import geo_prior.utils as ut class GridPredictor: def __init__(self, mask, params, mask_only_pred=False): ''' Args: mask: (1002, 2004) mask for the earth, (lat, lon ), so that when you plot it, it will be naturally the whole globe ''' # set up coordinates to make dense prediction on grid self.device = params['device'] self.params = params self.mask = mask self.use_date_feats = params['use_date_feats'] self.spa_enc_type = params["spa_enc_type"] # np.gradient compute the x (np.gradient(mask)[0]) and y (np.gradient(mask)[1]) partial drevative self.mask_lines = (np.gradient(mask)[0]**2 + np.gradient(mask)[1]**2) self.mask_lines[self.mask_lines > 0.0] = 1.0 # set up feature grid this will be height X width X num feats # gird_lon: (2004), a list of lon, but from [-1, 1] grid_lon = torch.linspace(-1, 1, mask.shape[1]).to(self.device) # torch.tensor.repeat() like numpy.tile() # grid_lon: (1002, 2004, 1) grid_lon = grid_lon.repeat(mask.shape[0],1).unsqueeze(2) # grid_lat: (1002), a list of lat, but from [-1, 1] grid_lat = torch.linspace(1, -1, mask.shape[0]).to(self.device) # grid_lat: (1002, 2004, 1) grid_lat = grid_lat.repeat(mask.shape[1], 1).transpose(0,1).unsqueeze(2) dates = torch.zeros(mask.shape[0], mask.shape[1], 1, device=self.device) if self.spa_enc_type == "geo_net": if self.use_date_feats: # loc_time_feats: (1002, 2004, 3), 3 means (lon, lat, date) loc_time_feats = torch.cat((grid_lon, grid_lat, dates), 2) loc_time_feats = ut.encode_loc_time(loc_time_feats[:,:,:2], loc_time_feats[:,:,2], concat_dim=2, params=params) else: loc_time_feats = torch.cat((grid_lon, grid_lat), 2) loc_time_feats = ut.encode_loc_time(loc_time_feats[:,:,:2], None, concat_dim=2, params=params) elif self.spa_enc_type in ut.get_spa_enc_list(): assert self.use_date_feats == False grid_lon = grid_lon * 180 grid_lat = grid_lat * 90 # loc_time_feats: (1002, 2004, 2) loc_time_feats = torch.cat((grid_lon, grid_lat), 2) else: raise Exception("spa_enc not defined!!!") self.feats = loc_time_feats # for mask only prediction if mask_only_pred: # get 1-d index in mask where value == 1 self.mask_inds = np.where(self.mask.ravel() == 1)[0] # feats_local: all (lon, lat, date) feature where mask value == 1 self.feats_local = self.feats.reshape(self.feats.shape[0]*self.feats.shape[1], self.feats.shape[2])[self.mask_inds, :].clone() def dense_prediction(self, model, class_of_interest, time_step=0, mask_op=True): ''' Given model, we show the probability distribution over the world of the class_of_interest Return: grid_pred: (1002, 2004) ''' # make prediction for entire grid at different time steps - by looping over columns # time_step should be in range [0, 1] # feats_change_time: (1002, 2004, 3) or (1002, 2004, 2) feats_change_time = self.feats.clone() if self.use_date_feats: feats_change_time = self.update_date_feats(feats_change_time, time_step) grid_pred = np.zeros(self.mask.shape, dtype=np.float32) model.eval() with torch.no_grad(): # loop throough each longitude for col in range(feats_change_time.shape[1]): # pred: (batch_size) pred = model(feats_change_time[:,col,:], class_of_interest=class_of_interest) grid_pred[:, col] = pred.cpu().numpy() if mask_op: return grid_pred*self.mask + self.mask_lines else: return grid_pred def dense_prediction_sum(self, model, time_step=0, mask_op=True): # make prediction for entire grid at different time steps - by looping over columns # takes the mean prediction for each class feats_change_time = self.feats.clone() if self.use_date_feats: feats_change_time = self.update_date_feats(feats_change_time, time_step) grid_pred = np.zeros(self.mask.shape, dtype=np.float32) model.eval() with torch.no_grad(): for col in range(feats_change_time.shape[1]): pred = model(feats_change_time[:,col,:]).sum(1) grid_pred[:, col] = pred.cpu().numpy() max_val = grid_pred.max() if mask_op: return grid_pred*self.mask + self.mask_lines, max_val else: return grid_pred, max_val def dense_prediction_masked(self, model, class_of_interest, time_step): # only masks predictions for valid datapoints if self.use_date_feats: self.feats_local = self.update_date_feats(self.feats_local, time_step) model.eval() with torch.no_grad(): pred = model(self.feats_local, class_of_interest=class_of_interest) grid_pred = self.create_full_output(self, pred.cpu().numpy()) return grid_pred def dense_prediction_masked_feats(self, model, time_step): # only masks predictions for valid datapoints if self.use_date_feats: feats_local = self.update_date_feats(self.feats_local, time_step) model.eval() with torch.no_grad(): feats = model(self.feats_local, return_feats=True) return feats def create_full_output(self, pred): ''' Given a global prediction matrix by using prediction of all valid data points Fill out the mask Args: pred: (..., len(self.mask_inds)), prediction of all valid data points ''' grid_pred = np.zeros(self.mask.shape[0]*self.mask.shape[1], dtype=np.float32) grid_pred[self.mask_inds] = pred return grid_pred.reshape((self.mask.shape[0], self.mask.shape[1])) def update_date_feats(self, feats, time_step): # helper function - for visualization we want to vary the date offset = 0 if self.params['loc_encode'] == 'encode_cos_sin': offset = 4 elif self.params['loc_encode'] == 'encode_3D': offset = 3 if len(feats.shape) == 2: if self.params['date_encode'] == 'encode_cos_sin': feats[:,offset] = math.sin(math.pi*(2*time_step - 1)) feats[:,offset+1] = math.cos(math.pi*(2*time_step - 1)) elif self.params['date_encode'] == 'encode_none': feats[:,offset] = (2*time_step - 1) else: if self.params['date_encode'] == 'encode_cos_sin': feats[:,:,offset] = math.sin(math.pi*(2*time_step - 1)) feats[:,:,offset+1] = math.cos(math.pi*(2*time_step - 1)) elif self.params['date_encode'] == 'encode_none': feats[:,:,offset] = (2*time_step - 1) return feats
[ "gengchen.mai@gmail.com" ]
gengchen.mai@gmail.com
a4c063576e93f33f70d241c426e780fbedee2dca
bc15d20fc0ed12059debf7a5fb7a38ba1c1d179f
/dgp/urls.py
4768ecc62f82ebf2501701f763f5d632b701bbae
[]
no_license
Spikenstein/my-first-blog
aaf89acab9b9076cd5c88e00c32ee2ce2bab5496
9b3e06dfb0851a9530cd735fc8a5efced18c7801
refs/heads/master
2021-01-20T18:24:16.116997
2016-06-28T14:42:35
2016-06-28T14:42:35
61,197,924
0
0
null
null
null
null
UTF-8
Python
false
false
354
py
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.run_list, name='run_list'), url(r'^run/(?P<pk>[0-9]+)/$', views.run_detail, name='run_detail'), url(r'^newSelectionRun/$', views.newSelectionRun, name='newSelectionRun'), url(r'^newParametersRun/$', views.newParametersRun, name='newParametersRun'), ]
[ "stephane.picq@gmail.com" ]
stephane.picq@gmail.com
46ce65b6676b954ecbf444ddd7b6a9e00ba28ce4
9b342f025762c0ae36fa9f30e3b3456e7ecbddbf
/resources/user.py
c87a02872341e8e2085e37bf7e86272fab9dab08
[]
no_license
skpeters87/flask-store-api
6ff973e921c7be5e4992ca2e493a009c628a8c3a
f1c5b422b4d3fae759d0328e22995f3c064a6e92
refs/heads/master
2023-05-10T09:49:29.620924
2020-05-25T01:55:25
2020-05-25T01:55:25
266,638,810
0
0
null
2023-05-01T21:50:45
2020-05-24T22:58:43
Python
UTF-8
Python
false
false
1,055
py
from flask_restful import Resource, reqparse from models.user import UserModel class UserRegister(Resource): parser = reqparse.RequestParser() parser.add_argument('username', type=str, required=True, help='invalid entry') parser.add_argument('password', type=str, required=True, help='invalid entry') def post(self): data = UserRegister.parser.parse_args() if UserModel.find_by_username(data['username']): return {"message": "UserModel already exists"}, 400 user = UserModel(**data) user.save_to_db() return {'message': 'UserModel created successfully'}, 201 def delete(self): data = UserRegister.parser.parse_args() if UserModel.find_by_username(data['username']): UserModel.delete_from_db() return {'message': 'user deleted sucessfully'} return {'message': 'user not found'}, 400
[ "skpeters87@gmail.com" ]
skpeters87@gmail.com
1713babd927c9dfa4224e1ad3277567e37cb7907
786027545626c24486753351d6e19093b261cd7d
/ghidra9.2.1_pyi/ghidra/app/util/bin/format/pdb2/pdbreader/symbol/AbstractUnknownMsSymbol.pyi
09fd912dc8f16775243884f29d4f2ce850338007
[ "MIT" ]
permissive
kohnakagawa/ghidra_scripts
51cede1874ef2b1fed901b802316449b4bf25661
5afed1234a7266c0624ec445133280993077c376
refs/heads/main
2023-03-25T08:25:16.842142
2021-03-18T13:31:40
2021-03-18T13:31:40
338,577,905
14
1
null
null
null
null
UTF-8
Python
false
false
702
pyi
import ghidra.app.util.bin.format.pdb2.pdbreader.symbol import java.lang class AbstractUnknownMsSymbol(ghidra.app.util.bin.format.pdb2.pdbreader.symbol.AbstractMsSymbol): def emit(self, __a0: java.lang.StringBuilder) -> None: ... def equals(self, __a0: object) -> bool: ... def getClass(self) -> java.lang.Class: ... def getPdbId(self) -> int: ... def hashCode(self) -> int: ... def notify(self) -> None: ... def notifyAll(self) -> None: ... def toString(self) -> unicode: ... @overload def wait(self) -> None: ... @overload def wait(self, __a0: long) -> None: ... @overload def wait(self, __a0: long, __a1: int) -> None: ...
[ "tsunekou1019@gmail.com" ]
tsunekou1019@gmail.com
cfa3740ba18f9384af22770130b7148306057883
a96f603b34525f97c4b2fdca9f329aa38ffcc18c
/models/result_time_table_model.py
a705bdd8dd445e922470a4421768a1975e585705
[]
no_license
mparlaktuna/capraz_sevkiyat2.0
d1fbdaaeeec4c4113448aa18b0e58457ca2ad0e5
3d350826084230e2c71b57e0b587e193d72b2985
refs/heads/master
2020-04-06T07:11:14.641477
2016-08-26T14:43:25
2016-08-26T14:43:25
59,899,015
0
0
null
null
null
null
UTF-8
Python
false
false
1,724
py
from PyQt5.QtWidgets import * from PyQt5.QtCore import * from PyQt5.QtGui import * class ResultTimeTableModel(QAbstractTableModel): def __init__(self, results, number_of_trucks, truck_name): super(ResultTimeTableModel, self).__init__() self.times = results.times try: self.v_header = [truck_name + str(i) for i in range(number_of_trucks)] self.h_header = [a[0] for a in self.times[self.v_header[0]]] except: pass def rowCount(self, QModelIndex_parent=None, *args, **kwargs): try: a = len(self.v_header) except: a = 0 return a def columnCount(self, QModelIndex_parent=None, *args, **kwargs): try: a = len(self.h_header) except: a = 0 return a def headerData(self, p_int, Qt_Orientation, int_role=None): if int_role == Qt.DisplayRole: if Qt_Orientation == Qt.Vertical: try: return QVariant(self.v_header[p_int]) except: return QVariant() elif Qt_Orientation == Qt.Horizontal: try: return QVariant(self.h_header[p_int]) except: return QVariant() else: return QVariant() def data(self, QModelIndex, int_role=None): if not QModelIndex.isValid(): return QVariant() if int_role == Qt.DisplayRole: try: return QVariant(self.times[self.v_header[QModelIndex.row()]][QModelIndex.column()][1]) except: return QVariant() else: return QVariant()
[ "mparlaktuna@gmail.com" ]
mparlaktuna@gmail.com
f1eface4f93946e2c307b7ffb8b98756f0dcffdb
209fb2ccbdfa10c69a3a1374dcd88386f443f092
/app_config.py
41450922309ba86494117f88509da54600575e18
[]
no_license
AKASOLOMID/flask_email
ede96c7d66176a3a7a858ef05e0569cc5ce7750a
fa5a37d608da29f0c00ab307d3ead6a64ffa078e
refs/heads/master
2021-01-10T19:09:38.909605
2014-07-02T20:50:01
2014-07-02T20:50:01
null
0
0
null
null
null
null
UTF-8
Python
false
false
363
py
class Config(object): DEBUG = False TESTING = False DATABASE_URI = 'db/email_flask.db' class ProductionConfig(Config): DATABASE_URI = 'db/email_flask.db' class DevelopmentConfig(Config): DEBUG = True DATABASE_URI = 'sqlite://:memory:' class TestingConfig(Config): TESTING = True DATABASE_URI = 'test/test.db'
[ "yoursyj@gmail.com" ]
yoursyj@gmail.com
13c3d1decbae42e219344347744f7622a9b4a4f7
281e2f5b71e013217e940dfa12d3e2fa58cdb5a5
/test.py
6aec51ecfc769a4b04c96a1fc765119c0785c962
[]
no_license
nh2/linux-bad-core-scheduling-investigation
e894f36f9ab9629dc018fdf5f3e8b3ecd24f8bbf
6bc14d9931a9d8fc70087370a7e0fa4ac32f72dd
refs/heads/master
2020-12-24T18:14:06.074397
2016-04-16T00:50:37
2016-04-16T00:50:37
56,279,666
2
1
null
null
null
null
UTF-8
Python
false
false
645
py
#!/usr/bin/env python3 # Requires python3 >= 3.4, and stress-ng to be installed. import subprocess import sys from statistics import stdev, mean def measure(): time_seconds = float(str(subprocess.check_output("stress-ng -c2 --cpu-method ackermann --cpu-ops 10 | grep -o '[0-9][0-9\.]*s'", shell=True), encoding="utf-8").strip().rstrip("s")) return time_seconds sample = [measure() for x in range(1,50)] s = stdev(sample) m = mean(sample) relative_stdev = s / m print("Standard deviation: ", s) print("Relative standard deviation: ", relative_stdev) if relative_stdev < 0.16: print("not bugged") else: print("bugged") sys.exit(1)
[ "mail@nh2.me" ]
mail@nh2.me
03c796ab2acc97968c7de60928c9c4189fff980f
a79fb6cf661586952134e008c54bf32d7e191823
/chat_server/chats4/jiot_chat.py
f308b1fe2d102186e9f72b5312615b64dfd11b4f
[]
no_license
YeoHoonYun/jiot-service
556867e850bec6f36918c7d0fedd989a9d594ac9
e5fe271a18b3072ed94c37ed6abb5839a7a8bf8a
refs/heads/master
2022-02-14T01:41:04.166152
2019-08-28T13:02:38
2019-08-28T13:02:38
204,930,618
0
0
null
null
null
null
UTF-8
Python
false
false
2,324
py
from flask import Flask, request, abort, jsonify from flask_socketio import SocketIO from flask_cors import CORS, cross_origin from pytz import timezone from datetime import datetime from chat_test import * import json, pymongo, pprint # app = Flask(__name__) # app.config['SECRET_KEY'] = 'vnkdjnfjknfl1232#' # cors = CORS(app, resources={r"/*": {"origins": "*"}}) # socketio = SocketIO(app) # # @app.route('/device1', methods=['POST', 'OPTIONS']) # @cross_origin() app = Flask(__name__) app.config['SECRET_KEY'] = 'vnkdjnfjknfl1232#' cors = CORS(app, resources={r"*": {"origins": "*"}}) socketio = SocketIO(app) @app.route('/device1', methods=['POST',"GET"]) @cross_origin(app) def foo(): if not request.json: abort(400) result = json.loads(request.data) # print(type(result)) # pprint.pprint(result) try: # mongodb mongoUpdate(json.loads(json.dumps(result))) except: mongoUpdate(json.loads(json.dumps(result))) if type(result["usrPk"]) != list: result["usrPk"] = [result["usrPk"]] result["dt"] = get_kst() # pprint.pprint(result) if result.get("msgtyp") == 103005 or result.get("msgTyp") == 103005: massage = json.loads(result["message"]) result["message"] = json.dumps({ "ID" : massage["ID"], "msg" : word_anal(massage["msg"], result) }) result["tit"] = "완료 메시지" # print("anal") # print(json.loads(result["message"])["msg"]) for uid in result.get("uidList"): print(uid) if(result.get('isShowMsg') == False or result.get("msgTyp") == 103001): pass pprint.pprint(result) socketio.emit(str(uid), result, callback=messageReceived) return json.dumps(result) def get_kst(): fmt = "%Y-%m-%d %H:%M:%S %Z%z" KST = datetime.now(timezone('Asia/Seoul')) return KST.strftime(fmt) def mongoUpdate(data): conn = pymongo.MongoClient('mongodb://jiguem:jigeum!@183.98.179.130:27017/default_db?authSource=jiot_log') db = conn.get_database('jiot_log') collection = db.get_collection('send_messages') collection.insert(data) def messageReceived(methods=['GET', 'POST']): print('message was received!!!') if __name__ == '__main__': app.run(host='0.0.0.0', port=5003, debug=True)
[ "cjswo9207@gmail.com" ]
cjswo9207@gmail.com
d0c94eb069edcb59bae5016f312fd58240e8b083
c930514f5620da7d8aea39c51c22bffa74d02d1b
/atmPy/radiation/rayleigh/lab.py
02c7db32db2f7488242cde2a1588d021e44f33ab
[ "MIT" ]
permissive
hagne/atm-py
0828e1cc4d4673fe99ea3ed7d6f64baf8d2ed749
f2cc0832ccedb2c52f222a6754e83ce553385fb1
refs/heads/master
2023-08-19T03:31:44.251118
2023-08-03T21:55:22
2023-08-03T21:55:22
50,296,187
4
3
MIT
2019-03-06T16:29:58
2016-01-24T16:24:53
Python
UTF-8
Python
false
false
907
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Sep 8 11:27:53 2022 @author: hagen """ # import numpy as np def rayleigh_od_johnsmethod(pressure_surface, wavelength, pressure_standard = 1013.25): """ This is the method to estimate total collum OD from rayleigh scattering that John is using in his AOD analysis (aod_analysis.f) Parameters ---------- pressure_surface : TYPE DESCRIPTION. wavelength : TYPE DESCRIPTION. pressure_standard : TYPE, optional DESCRIPTION. The default is 1013.25. Returns ------- TYPE DESCRIPTION. """ p_s = pressure_surface p_o = pressure_standard # wave_l = 500 wave_l = wavelength exponent = -4.15 + (0.2*(wave_l/1000.)) exp_term = (wave_l/1000.)**exponent rayleigh = (0.0088*exp_term*p_s)/p_o # trans = np.e**(-rayleigh) return rayleigh
[ "hagen.telg@gmail.com" ]
hagen.telg@gmail.com
df321da5e6a1fdf6dea7212baced4314bf3ec1eb
0b221e8bdd113fa4d28dfd7065440832f31ec47d
/L5/lab5/db_utils.py
4548a5bf9b11f27508bc17b3527d86f667fa7e2b
[]
no_license
vyklyuk/python_labs
7705bfc0da07ff029e98b891bdd1a01d23a942c8
15ba9397471ae112b6c406d4bad6e6ffbb6afea6
refs/heads/master
2022-12-31T19:48:34.043975
2020-10-16T12:28:29
2020-10-16T12:28:29
297,615,082
0
1
null
2020-10-16T12:28:30
2020-09-22T10:31:04
Python
UTF-8
Python
false
false
2,114
py
from lab5.models import Session, Users, Wallets, Transactions def list_users(email=None, first_name=None, last_name=None): session = Session() filters = [] if email: filters.append(Users.email.like(email)) if first_name: filters.append(Users.email.like(first_name)) if last_name: filters.append(Users.email.like(last_name)) return session.query(Users).filter(*filters).all() def list_wallets(*filters): session = Session() return ( session.query(Wallets) .join(Users) .filter(*filters) .all() ) def list_transactions_for_wallet(user_uid, wallet_id): session = Session() session.query(Wallets).filter_by(uid=wallet_id, owner_uid=user_uid).one() transactions_from = ( session.query(Transactions) .join(Wallets, Transactions.from_wallet_uid == Wallets.uid) .filter( Wallets.owner_uid == user_uid, Transactions.from_wallet_uid == wallet_id ) ) transactions_to = ( session.query(Transactions) .join(Wallets, Transactions.to_wallet_uid == Wallets.uid) .filter( Wallets.owner_uid == user_uid, Transactions.to_wallet_uid == wallet_id ) ) return ( transactions_from.union(transactions_to) .order_by(Transactions.datetime) .all() ) def create_entry(model_class, *, commit=True, **kwargs): session = Session() entry = model_class(**kwargs) session.add(entry) if commit: session.commit() return entry def get_entry_by_uid(model_class, uid, **kwargs): session = Session() return session.query(model_class).filter_by(uid=uid, **kwargs).one() def update_entry(entry, *, commit=True, **kwargs): session = Session() for key, value in kwargs.items(): setattr(entry, key, value) if commit: session.commit() return entry def delete_entry(model_class, uid, *, commit=True, **kwargs): session = Session() session.query(model_class).filter_by(uid=uid, **kwargs).delete() if commit: session.commit()
[ "vyklyuk@ukr.net" ]
vyklyuk@ukr.net
cc3684181fbe33907028bc6c8d5d0ffd573945be
8cfda94bb4b421b9b6fbb22cecb4ff2ae75cb8ce
/start_python/day01.py
443db7d10dc2a92b39f63f4dab6b4b3412ee1cac
[]
no_license
Royal-Chokobi/pythonBasic
15e79de919766dfc2ae3fcd72e4bf15e352ffd82
491516c0934d0e8851c62156dc5c543de84e297c
refs/heads/master
2020-04-28T22:19:20.137566
2019-05-30T13:09:22
2019-05-30T13:09:22
175,612,898
0
0
null
null
null
null
UTF-8
Python
false
false
1,425
py
import keyword def changeNumber(x, y) : print('교환전 : ', x, y) x, y = y, x print('교환후 : ', x, y) print('hello') print('기초수업') score1 = 100 score2 = 200 score3 = 300 total = score1+score2+score3 print(score1, score2, score3) print(total) x = 10 y = 2.5 print(type(x)) print(type(y)) print(keyword.kwlist) s1 = 90 s2 = 90 s3 = 90 total_s = s1+s2+s3 print(total_s) a = 10 b = 20 print('a와 b를 출력 : ', a, b) a1 = a a = b b = a1 ''' 파이썬은 자료 교환이 a, b = b, a 가 적용이 됨. ''' print('a와 b를 교환해서 출력 : ', a, b) changeNumber(10, 20) print('==============================수치 자료형과 계산하기==========================================') def numberCalculation(x, y): print('더하기 : ',x+y) print('빼기 : ',x-y) print('곱하기 : ', x*y) print('제곱 : ', x**y) x,y = y,x print('x,y = y,x') print('나누기 : ',x/y) print('몫구하기 : ', x//y) print('나머지 : ',x%y) numberCalculation(2,11) print('=================================문제~ ==================================================') x = 1479 a = (x//1000) b = (x//100)%10 c = ((x//10)%100)%10 d = x%10 print(a) print(b) print(c) print(d) print('=========================') n= 2500 h = (n//(60*60)) m = (n - (h*(60**2)))//60 s = n - ((h*(60**2)) + (m*60)) print(h) print(m) print(s) print(0.2+0.1)
[ "ollehing@gmail.com" ]
ollehing@gmail.com
88993bd670d098610c4e52f4cd46b2cf58f810f2
fb13921ed3751e48de562de02e939df01f30e36e
/alunos.py
7d95b68c0e9f286bfaefd2543362f33654b8674a
[]
no_license
sevendri/fiap
a7f7084de7aab826ed7d86029b58c63e933c52e4
ed4a990c831e180892789d0b7ddb66cbb404718b
refs/heads/master
2022-12-08T13:00:45.485151
2019-07-13T13:49:59
2019-07-13T13:49:59
196,723,643
0
0
null
2022-12-08T05:52:42
2019-07-13T13:16:30
JavaScript
UTF-8
Python
false
false
2,283
py
from datetime import datetime from flask import jsonify, make_response, abort def get_timestamp(): return datetime.now().strftime(("%Y-%m-%d %H:%M:%S")) PEOPLE = { "Jones": { "fname": "Indiana", "lname": "Jones", "timestamp": get_timestamp(), "teste": "func", }, " Sparrow": { "fname": "Jack", "lname": " Sparrow", "timestamp": get_timestamp(), }, "Snow": { "fname": "John", "lname": "Snow", "timestamp": get_timestamp(), }, } def read_all(): dict_alunos = [PEOPLE[key] for key in sorted(PEOPLE.keys())] alunos = jsonify(dict_alunos) qtd = len(dict_alunos) content_range = "alunos 0-"+str(qtd)+"/"+str(qtd) # Configura headers alunos.headers['Access-Control-Allow-Origin'] = '*' alunos.headers['Access-Control-Expose-Headers'] = 'Content-Range' alunos.headers['Content-Range'] = content_range return alunos def read_one(lname): if lname in PEOPLE: person = PEOPLE.get(lname) else: abort( 404, "AAAAPerson with last name {lname} not found".format(lname=lname) ) return person def create(person): lname = person.get("lname", None) fname = person.get("fname", None) if lname not in PEOPLE and lname is not None: PEOPLE[lname] = { "lname": lname, "fname": fname, "timestamp": get_timestamp(), } return make_response( "{lname} successfully created".format(lname=lname), 201 ) else: abort( 406, "Person with last name {lname} already exists".format(lname=lname), ) def update(lname, person): if lname in PEOPLE: PEOPLE[lname]["fname"] = person.get("fname") PEOPLE[lname]["timestamp"] = get_timestamp() return PEOPLE[lname] else: abort( 404, "Person with last name {lname} not found".format(lname=lname) ) def delete(lname): if lname in PEOPLE: del PEOPLE[lname] return make_response( "{lname} successfully deleted".format(lname=lname), 200 ) else: abort( 404, "Person with last name {lname} not found".format(lname=lname) )
[ "sevendri@hotmail.com" ]
sevendri@hotmail.com
ed89df92447a5ccef8d43ce951e44edf085f4982
86c5cd3c4a8937177a8566095d10cd7dba54b1f9
/Python4_class——data/python_json/Python_json.py
d57915cb1f1834cd4469c1ce66eb59f72d6e8ea3
[]
no_license
lynnoflynn/python_practice
613de6bca06cd2e8df781fea9f712211860090d8
3908f85183f812bf4415f0ec7cdeb5e910159fd1
refs/heads/master
2022-12-30T06:09:33.542886
2020-10-14T10:09:14
2020-10-14T10:09:14
289,768,303
0
0
null
null
null
null
UTF-8
Python
false
false
508
py
import json #json.dump 表示把python对象写入在文件中 #json.dumps 表示把python对象转化成字符串 dict_hogwarts = { "a": [1, 2, 3], "name": ["spider man", "战士"] } # 在data.json中写入Python object数据 # with open("data.json", "w") as f: # json.dump(dict_hogwarts, f) # print(type(dict_hogwarts)) # print(type(json.dumps(dict_hogwarts))) # <class 'dict'> # <class 'str'> json_load = json.load(open("data.json")) print("使用json_load的数据类型为",type(json_load))
[ "378774164@qq.com" ]
378774164@qq.com
34923e2be2a7691a288d437371f696be6108d29b
488c2a204711fc16090601a02684564cf99bf56b
/openCv/Morphological_Transformations.py
ff59f11087cbff02247cf17f8a76426c233a0edf
[]
no_license
malhotra5/Completed-Projects
792ee8a2bf2d66e0cca0a9152e3e8c1a9876da85
f8533a9030cc67234571c74d5c3ae6d382d93d65
refs/heads/master
2021-09-07T15:20:50.762633
2018-02-24T23:44:39
2018-02-24T23:44:39
111,261,854
2
0
null
null
null
null
UTF-8
Python
false
false
1,016
py
import cv2 import numpy as np cap = cv2.VideoCapture(0) while True: _, frame = cap.read() hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) lower_yellow = np.array([29,97,0]) upper_yellow = np.array([66,255,255]) mask = cv2.inRange(hsv, lower_yellow, upper_yellow) res = cv2.bitwise_and(frame, frame, mask = mask) #Erosion and dilation kernel = np.ones((5,5), np.uint8) ##erosion = cv2.erode(mask, kernel, iterations = 1) ##dilation = cv2.dilate(mask, kernel, iterations = 1) #Opening and closing opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel) closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) cv2.imshow("Frame", frame) cv2.imshow("Result", res) ##cv2.imshow("Erosion", erosion) ##cv2.imshow("dialation", dilation) cv2.imshow("Opening", opening) cv2.imshow("Closing", closing) if cv2.waitKey(1) & 0xFF == ord("q"): break cv2.destroyAllWindows() cap.release()
[ "noreply@github.com" ]
malhotra5.noreply@github.com
dbdda969b4f93ff83be9126e3528a31fdc8bacf4
cbc3c3e602996fe5561b06545593f1a9a2401f42
/heranca/EmpregadoHorista.py
607bc67e665402c8d950d86dc556ddda133ded26
[]
no_license
ricdtaveira/poo-python
fc06040acd032975e355edf62a8c2983f1d37972
3ecba2ccfcc84f79cfc1ef5247c017e58f36c8d4
refs/heads/master
2021-10-24T17:57:57.051213
2019-03-27T00:10:47
2019-03-27T00:10:47
105,895,238
0
0
null
null
null
null
UTF-8
Python
false
false
484
py
''' Classe Empregado Horista ''' from Empregado import Empregado class EmpregadoHorista(Empregado): ''' Empregado Horista ''' def __init__(self, primeiro_nome, ultimo_nome, salario): super().__init__(primeiro_nome, ultimo_nome, salario) self.__horas = 0 def calcular_pagamento(self): ''' Calcula o Pagamento do Horista ''' return self.__horas * self.salario def adicionar_horas(self, horas): ''' Adicionar Horas ''' self.__horas = self.__horas + horas
[ "ricdtaveira@gmail.com" ]
ricdtaveira@gmail.com
4ebdf94e0d8f96f9ac8d65ae46ad57e3e7daeee4
73332abdcadb62f4f262d0c30856c3c257a9ee7d
/tests/environments/__init__.py
d8f2ee2ede569a4f0b62f68bcf1956da8c7c993e
[ "BSD-2-Clause" ]
permissive
code-google-com/oyprojectmanager
454435604cc150c1b54ec2c54294e0fa05490f82
3085ecbe1cc04a73ec69b4848b789009546feae7
refs/heads/master
2021-01-19T02:40:56.342086
2015-01-26T16:40:00
2015-01-26T16:40:00
32,266,400
1
2
null
null
null
null
UTF-8
Python
false
false
206
py
# -*- coding: utf-8 -*- # Copyright (c) 2009-2012, Erkan Ozgur Yilmaz # # This module is part of oyProjectManager and is released under the BSD 2 # License: http://www.opensource.org/licenses/BSD-2-Clause
[ "eoyilmaz@gmail.com" ]
eoyilmaz@gmail.com
40244fa82eb22dff011a90e2107e891cc04973fb
fb6c86ea2c21bf0d338bbc3b2d5ec2e9d06ac9a4
/debug_logger.py
3e531de42be351910d4d72ca9fbba1f254c590de
[ "MIT" ]
permissive
realtwister/LearnedEvolution
6bebe802d0171ddaa5cc56006d07949686267265
2ec49b50a49acae9693cfb05ac114dfbcc4aa337
refs/heads/master
2020-03-12T05:37:27.468651
2019-01-11T14:50:40
2019-01-11T14:50:40
130,467,388
0
0
null
null
null
null
UTF-8
Python
false
false
3,516
py
import tensorflow as tf; import logging; import os; from learnedevolution.tensorboard.algorithm_logger import AlgorithmLogger; from learnedevolution.tensorboard.generator_logger import GeneratorLogger; import learnedevolution as lev; from learnedevolution.rewards.differential_reward import DifferentialReward; from learnedevolution.rewards.divergence_penalty import DivergencePenalty; from learnedevolution.rewards.normalized_fitness_reward import NormalizedFitnessReward, DecayingMinimum, WindowMinimum, InitialMinimum, LaggingMaximum; from learnedevolution.convergence.convergence_criterion import ConvergenceCriterion; from learnedevolution.convergence.time_convergence import TimeConvergence; logging.basicConfig(level="DEBUG"); dimension = 2; population_size = 100; seed = 1000; base_path = '/tmp/thesis/logger/'; if True: i = 0; while os.path.exists(base_path+str(i)): i += 1; logdir = base_path+str(i) else: logdir = base_path+"new_convergence" if os.path.exists(logdir): raise Exception("Pad bestaat al"); # Convergence criteria convergence_criterion = ConvergenceCriterion(reward_per_step=0.5, gamma=0.02); time_convergence = TimeConvergence(120); convergence_criteria = [time_convergence]; minima = [ DecayingMinimum(0.95), WindowMinimum(20), InitialMinimum() ]; maxima = [ LaggingMaximum() ] normalized_fitness = NormalizedFitnessReward(minima,maxima) rewards = { normalized_fitness:1, DifferentialReward():0, DivergencePenalty():0}; ppo_mean = lev.targets.mean.BaselinePPOMean(dimension, population_size, rewards, convergence_criteria, logdir = logdir+"/agent"); mean_targets = { ppo_mean:1, #lev.targets.mean.TensorforcePPOMean(dimension, population_size):1, }; diag_covariance = lev.targets.covariance.DiagonalCovariance(0.2, [1,2]) covariance_targets ={ #lev.targets.covariance.ConstantCovariance():1, #lev.targets.covariance.AdhocCovariance():1, diag_covariance:1, }; algo = lev.Algorithm(dimension, mean_targets, covariance_targets, convergence_criteria, population_size=population_size); log = AlgorithmLogger(algo, logdir = logdir); covariance_log = log.create_child(diag_covariance); covariance_log.recorder.watch('variance','variance') mean_log = log.create_child(ppo_mean); mean_log.recorder.watch('_current_reward','reward'); reward_log = mean_log.create_child(normalized_fitness); reward_log.recorder.watch('_minimum', 'minimum') reward_log.recorder.watch('_maximum', 'maximum'); convergence_log = log.create_child(algo._convergence_criteria[0]); convergence_log.watch_scalar('epsilon', 'after_reset', once_in=10, tag="epsilon"); problem_generator = lev.problems.Rosenbrock.generator(dimension=dimension); gen_log = GeneratorLogger(problem_generator, logdir); # seeding problem_generator.seed(seed); algo.seed(seed); #log.watch_scalar('_mean_fitness', 'before_reset', once_in=1, tag="mean_fitness"); #log.watch_histogram('_evaluated_fitness', 'before_reset', once_in=1, tag="evaluated_fitness"); i = 0; for problem in problem_generator.iter(10000): if i%100==0: log.record(suffix="deterministic"); gen_log.add_current('problem', algo.current_step+1); random_state_backup = algo._random_state.get_state(); mean, covariance = algo.maximize(problem.fitness, 100, True); algo._steps -=1; algo._random_state.set_state(random_state_backup); log.record(); mean, covariance = algo.maximize(problem.fitness, 100); i+=1;
[ "realtwister@gmail.com" ]
realtwister@gmail.com
e622faef1504224b73d7a18120b832beeda16411
0e9b70d8d4cfb691349d47c6d787ebbe5d4389e7
/djangoapp/settings.py
533b75041d7261b3b96166dce73d8a90f2666cef
[]
no_license
klimon69/Django-project
5252c4bffdcc5c478b8c18e4249b6a825b8c65d7
20f3207728425eef7ed62efa484ca9fe9156d177
refs/heads/master
2022-09-20T15:55:51.425948
2020-06-02T17:44:49
2020-06-02T17:44:49
268,866,852
0
0
null
null
null
null
UTF-8
Python
false
false
3,113
py
""" Django settings for djangoapp project. Generated by 'django-admin startproject' using Django 3.0.6. For more information on this file, see https://docs.djangoproject.com/en/3.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'ax1gv%nhz9hw^_=0(@2h@&13*etwb_jaizna$d!bj-(ne#5&pj' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'firstapp', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'djangoapp.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'djangoapp.wsgi.application' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_URL = '/static/'
[ "aklimko@skillfactory.ru" ]
aklimko@skillfactory.ru
63530f10dc7fdc29a90fb8237fa885a8b7cc9f3b
590facf811b9ad0e55e5eafbe6a5ed796d76b521
/apps/meetup/migrations/0003_auto_20190428_1649.py
607f35d32482cdf39d2c2855c8736906f8e3ef7c
[]
no_license
wangonya/questioner_django
6193fa779121135b5c903fef599a5bc873107b52
b598d4337b3acc39adf3ef972e50f2d750376ac0
refs/heads/develop
2020-05-16T11:59:16.778797
2019-05-01T16:43:29
2019-05-01T16:43:29
183,032,935
2
1
null
2019-05-01T16:43:30
2019-04-23T14:29:33
Python
UTF-8
Python
false
false
1,083
py
# Generated by Django 2.2 on 2019-04-28 16:49 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('meetup', '0002_auto_20190428_0625'), ] operations = [ migrations.AddField( model_name='votesmodel', name='for_question', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='meetup.QuestionModel'), preserve_default=False, ), migrations.AddField( model_name='votesmodel', name='vote', field=models.SmallIntegerField(default=1), preserve_default=False, ), migrations.AddField( model_name='votesmodel', name='voter', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), preserve_default=False, ), ]
[ "kwangonya@gmail.com" ]
kwangonya@gmail.com
d7422c79219bab4b583ae36e290c879cd38b64d2
57db07fc4cc710b8ed5603e69217ed048649e142
/EmployeeManagement/cms/views.py
a0bdbe8aa3813dcc8bdfaafc74d1f770144f618a
[]
no_license
minetake/PythonProject
f6317da658ac5a9aeea3152fd6f5e3f0b58cd862
51df61672585a2aa3c57be5e3915c06a689e366f
refs/heads/master
2021-05-04T18:14:36.882702
2018-03-05T15:16:10
2018-03-05T15:16:10
120,097,945
0
0
null
2018-03-05T15:16:11
2018-02-03T14:23:26
HTML
UTF-8
Python
false
false
158
py
from django.http.response import HttpResponse from django.shortcuts import render def hello_template(request): return render(request,'cms/sample.html')
[ "minetake@ray.ocn.ne.jp" ]
minetake@ray.ocn.ne.jp
c19206bb3fa9ce790369bf4471ac2334d1b5fb2c
939c1254636cc0e78419265286519f087aff27ff
/Code_PA2/q1/run.py
d436c10635f9fcef0ab51bb290a1c0724bd5ad3b
[]
no_license
vaibhavnayel/cs4011
6ced14aed32a8d4143d00833fbaa68731688107e
3ad508bf9a03e32d526b9e4f5869821759bab951
refs/heads/master
2021-05-15T03:24:42.795837
2018-01-27T15:23:25
2018-01-27T15:23:25
119,175,705
1
0
null
null
null
null
UTF-8
Python
false
false
3,749
py
import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter #running linear regression without dimensionality reduction xtrain = np.genfromtxt('../../Data/DS3/train.csv', delimiter=',') xtrain=np.hstack((np.ones((len(xtrain),1)),xtrain))#appending column of ones for bias ytrain=(np.genfromtxt('../../Data/DS3/train_labels.csv', delimiter=',') -1.5)*2 #converting classes to +1 and -1 for convenience xtest= np.genfromtxt('../../Data/DS3/test.csv', delimiter=',') xtest=np.hstack((np.ones((len(xtest),1)),xtest)) ytest=(np.genfromtxt('../../Data/DS3/test_labels.csv', delimiter=',') -1.5)*2 #learning weights XTX=np.dot(xtrain.T,xtrain) XTY=np.dot(xtrain.T,ytrain) W=np.dot(np.linalg.inv(XTX),XTY) #evaluation yhat=((np.dot(xtest,W)>0)-0.5)*2 #metrics print 'Metrics for linear classification without dimensionality reduction:' acc=sum(yhat==ytest)/float(len(ytest)) m=metrics.precision_recall_fscore_support(ytest,yhat,average='binary') print 'Accuracy: ',acc print 'Precision: ',m[0] print 'Recall: ',m[1] print 'F measure: ',m[2] print 'Weights learned',W print 'Boundary: %f + %fX + %fY + %fZ = 0'%(W[0],W[1],W[2],W[3]) print '\n' fig = plt.figure() ax = fig.add_subplot(111, projection='3d') X = np.arange(-2, 4, 1) Y = np.arange(-5, 5, 1) X, Y = np.meshgrid(X, Y) Z=(-W[0]-X*W[1]-Y*W[2])/W[3] surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False) ax.scatter(xtrain[:1000,1],xtrain[:1000,2],xtrain[:1000,3],alpha=0.5) ax.scatter(xtrain[1000:,1],xtrain[1000:,2],xtrain[1000:,3],alpha=0.5) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') ax.set_title('DS3 3D plot') #ax.legend(['class 1','class 2','Decision boundary']) plt.show() #performing PCA from sklearn.decomposition import PCA #finding principal component pca = PCA(n_components=1) pca.fit(xtrain[:,1:]) print 'Principal component learned: ',pca.components_ #projecting onto principal component xtrain_pca=xtrain[:,1:].dot(pca.components_.T) xtrain_pca=np.hstack((np.ones((len(xtrain_pca),1)),xtrain_pca))#adding bias column xtest_pca=xtest[:,1:].dot(pca.components_.T) xtest_pca=np.hstack((np.ones((len(xtest_pca),1)),xtest_pca)) #saving projected data ''' np.savetxt("xtest_pca.csv", xtest_pca, delimiter=",") np.savetxt("xtrain_pca.csv", xtrain_pca, delimiter=",") ''' #training linear classifier XTX=np.dot(xtrain_pca.T,xtrain_pca) XTY=np.dot(xtrain_pca.T,ytrain) W=np.dot(np.linalg.inv(XTX),XTY) #evaluation yhat=((np.dot(xtest_pca,W)>0)-0.5)*2 print 'Metrics for linear classification with PCA:' acc=sum(yhat==ytest)/float(len(ytest)) m=metrics.precision_recall_fscore_support(ytest,yhat) print 'Accuracy: ',acc print 'Precision: ',m[0] print 'Recall: ',m[1] print 'F measure: ',m[2] print 'Weights learned',W print 'Boundary: X=', -W[0]/W[1] print '\n' #plotting fig = plt.figure(figsize=(7,9)) ax1 = fig.add_subplot(211) ax1.scatter(xtrain_pca[:1000,1],np.zeros(1000),c='orange',alpha=0.5) ax1.scatter(xtrain_pca[1000:,1],np.zeros(1000),c='blue',alpha=0.5) ax1.plot([-W[0]/W[1],-W[0]/W[1]],[-1,1],c='black') ax1.set_xlabel('Principal component') ax1.set_title('projected xtrain plotted on x axis') ax1.legend(['Decision boundary','class 1','class 2']) ax2 = fig.add_subplot(212) ax2.scatter(xtrain_pca[:1000,1],range(1000),c='orange',alpha=0.5) ax2.scatter(xtrain_pca[1000:,1],range(1000),c='blue',alpha=0.5) ax2.plot([-W[0]/W[1],-W[0]/W[1]],[0,1000],c='black') ax2.set_xlabel('Principal component') ax2.set_ylabel('Index') ax2.set_title('projected xtrain plotted by index for clarity') ax2.legend(['Decision boundary','class 1','class 2']) plt.show()
[ "vaibhavnayel@gmail.com" ]
vaibhavnayel@gmail.com
50be68b9ed14bc6e7cfa7d618467ffe4b3831cf6
1fc35e54ee4723cfa3d13de713895eac30616847
/baekjun/stage solve/14.sort/2750.py
5c0dafb7b9d7c5d8d8c61146fee82535d5d55b6e
[]
no_license
yhs3434/Algorithms
02f55a5dc21085c0a17d9eaec5e3ba0cbd6d651d
24d234a301077aac1bc4efbb269b41a963cedccf
refs/heads/master
2021-07-12T19:21:00.446399
2021-01-20T01:44:27
2021-01-20T01:44:27
226,431,877
6
0
null
null
null
null
UTF-8
Python
false
false
732
py
# 수 정렬하기 # https://www.acmicpc.net/problem/2750 import sys sys.setrecursionlimit(1000000) def solution(nums): quickSort(nums, 0, len(nums)-1) return nums def quickSort(arr, left, right): if left>=right: return pivot = arr[right] i = left j = left while j<right: if arr[j] < pivot: temp = arr[i] arr[i] = arr[j] arr[j] = temp i+=1 j+=1 else: j+=1 temp = arr[i] arr[i] = pivot arr[right] = temp quickSort(arr, left, i-1) quickSort(arr, i+1, right) n = int(input()) arrr = [] for xxx in range(n): arrr.append(int(input())) nums = solution(arrr) for n in nums: print(n)
[ "yoonhs3434@naver.com" ]
yoonhs3434@naver.com
ab8b857f11a12924ef956794dd8bf6b7f006e0eb
ac5eb7df8515fb92071f9e0f64d1cf6467f4042d
/Python/common_sort_algorithms.py
d337581718e16665fb015d5c5a9c79554100a9d1
[]
no_license
Litao439420999/LeetCodeAlgorithm
6ea8060d56953bff6c03c95cf6b94901fbfbe395
9aee4fa0ea211d28ff1e5d9b70597421f9562959
refs/heads/master
2023-07-08T06:17:20.310470
2021-08-08T08:12:23
2021-08-08T08:12:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,997
py
#!/usr/bin/env python3 # encoding: utf-8 """ @Filename: common_sort_algorithms.py @Function: 常用的排序算法 @Python Version: 3.8 @Author: Wei Li @Date:2021-07-09 """ # 快速排序 # 左闭右闭的二分写法 def quick_sort(nums, left, right): if left + 1 >= right: return first, last = left, right - 1 key = nums[first] while first < last: while first < last and nums[last] >= key: last -= 1 nums[first] = nums[last] while first < last and nums[first] <= key: first += 1 nums[last] = nums[first] nums[first] = key quick_sort(nums, left, first) quick_sort(nums, first + 1, right) # 归并排序 def merge_sort(seq): """归并排序""" if len(seq) <= 1: return seq mid = len(seq) // 2 # 将列表分成更小的两个列表 # 分别对左右两个列表进行处理,分别返回两个排序好的列表 left = merge_sort(seq[:mid]) right = merge_sort(seq[mid:]) # 对排序好的两个列表合并,产生一个新的排序好的列表 return merge(left, right) def merge(left, right): """合并两个已排序好的列表,产生一个新的已排序好的列表""" result = [] # 新的已排序好的列表 i = 0 # 下标 j = 0 # 对两个列表中的元素 两两对比。 # 将最小的元素,放到result中,并对当前列表下标加1 while i < len(left) and j < len(right): if left[i] <= right[j]: result.append(left[i]) i += 1 else: result.append(right[j]) j += 1 result += left[i:] result += right[j:] return result # 插入排序 def insertion_sort(nums): for i in range(1, len(nums)): key = nums[i] j = i - 1 while j >= 0 and key < nums[j] : nums[j+1] = nums[j] j -= 1 nums[j+1] = key # 冒泡排序 def bubble_sort(nums): n = len(nums) # 遍历所有数组元素 for i in range(n): # Last i elements are already in place for j in range(0, n - i - 1): if nums[j] > nums[j+1]: # swap operator nums[j], nums[j+1] = nums[j+1], nums[j] # 选择排序 def selection_sort(nums): for i in range(len(nums)): min_idx = i for j in range(i+1, len(nums)): if nums[min_idx] > nums[j]: min_idx = j nums[i], nums[min_idx] = nums[min_idx], nums[i] # ------------------------------ if __name__ == "__main__": # test case nums = [1,3,5,7,2,6,4,8,9,2,8,7,6,0,3,5,9,4,1,0] # temp = [0]*len(nums) print("----------------Before Sorting----------------") print(nums) print("----------------After Sorting----------------") print("Python Sort Algorithm:") nums_sorted_python = sorted(nums) print(nums_sorted_python) # ----------------Before Sorting---------------- # [1, 3, 5, 7, 2, 6, 4, 8, 9, 2, 8, 7, 6, 0, 3, 5, 9, 4, 1, 0] # ----------------After Sorting---------------- # Python Sort Algorithm: # [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9] print("Quick Sort Algorithm:") quick_sort(nums, 0, len(nums)) print(nums) # ----------------Before Sorting---------------- # [1, 3, 5, 7, 2, 6, 4, 8, 9, 2, 8, 7, 6, 0, 3, 5, 9, 4, 1, 0] # ----------------After Sorting---------------- # Quick Sort Algorithm: # [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9] print("Merge Sort Algorithm:") merge_sort_result = merge_sort(nums) print(merge_sort_result) # ----------------Before Sorting---------------- # [1, 3, 5, 7, 2, 6, 4, 8, 9, 2, 8, 7, 6, 0, 3, 5, 9, 4, 1, 0] # ----------------After Sorting---------------- # Merge Sort Algorithm: # [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9] print("Insertion Sort Algorithm:") insertion_sort(nums) print(nums) # ----------------Before Sorting---------------- # [1, 3, 5, 7, 2, 6, 4, 8, 9, 2, 8, 7, 6, 0, 3, 5, 9, 4, 1, 0] # ----------------After Sorting---------------- # Insertion Sort Algorithm: # [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9] print("Bubble Sort Algorithm:") bubble_sort(nums) print(nums) # ----------------Before Sorting---------------- # [1, 3, 5, 7, 2, 6, 4, 8, 9, 2, 8, 7, 6, 0, 3, 5, 9, 4, 1, 0] # ----------------After Sorting---------------- # Bubble Sort Algorithm: # [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9] print("Selection Sort Algorithm:") selection_sort(nums) print(nums) # ----------------Before Sorting---------------- # [1, 3, 5, 7, 2, 6, 4, 8, 9, 2, 8, 7, 6, 0, 3, 5, 9, 4, 1, 0] # ----------------After Sorting---------------- # Selection Sort Algorithm: # [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9]
[ "weili_yzzcq@163.com" ]
weili_yzzcq@163.com
06e13ba179cae93d997ce9d40d2b8b69af51a2e2
241d87e329554f362dcd8f041b0917625943c98c
/fatogo_RDB/migrations/0002_auto_20181230_0645.py
daa70ac893a79865d6412c02d869441a8e283248
[]
no_license
omnyx2/Fatogo
1c0949a230823b0ef1626aeda4df853c93c22ac9
1d066f8ae80f9b96816e459c56a7c42a94a028ec
refs/heads/master
2022-02-03T00:07:44.167559
2022-01-19T09:18:02
2022-01-19T09:18:02
162,788,614
0
1
null
2022-01-19T09:18:03
2018-12-22T06:47:52
CSS
UTF-8
Python
false
false
689
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.12 on 2018-12-30 06:45 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('fatogo_RDB', '0001_initial'), ] operations = [ migrations.AlterField( model_name='companyaddress', name='address1', field=models.CharField(blank=True, default='testasdas', max_length=100, null=True), ), migrations.AlterField( model_name='companyreview', name='reviewedGroup', field=models.CharField(default='te stasdas', max_length=100), ), ]
[ "omnyx2@gmail.com" ]
omnyx2@gmail.com
ac33194bffd378e21c7449116b073668876615e6
d99ac626d62c663704444a9cce7e7fc793a9e75e
/windows_asm_dump/dump_asm.py
919d0f476a5fa34c1e8c990412b5ca361122bb57
[]
no_license
Experiment5X/CryptoFunctionDetection
3ab32d5573a249d24db1faf772721bc80b8d905d
dac700193e7e84963943593e36844b173211a8a1
refs/heads/master
2023-04-19T09:12:35.828268
2021-05-13T22:39:27
2021-05-13T22:39:27
355,299,557
1
0
null
null
null
null
UTF-8
Python
false
false
5,802
py
import os import re import subprocess from pathlib import Path from collections import OrderedDict class BinaryCollection: def __init__(self, in_directory, out_directory): self.in_directory = in_directory self.out_directory = out_directory def process_all(self, limit=500): binaries_processed = 0 for filename in os.listdir(self.in_directory): file_path_str = os.path.join(self.in_directory, filename) file_path = Path(file_path_str) if not filename.startswith('.'): # file_path.suffix == '.exe' or file_path.suffix == '.dll': file_name_no_exetension = file_path.stem out_asm_path = os.path.join( self.out_directory, f'{file_name_no_exetension}.s' ) out_functions_path = os.path.join( self.out_directory, f'{file_name_no_exetension}_functions.txt' ) binary_file = BinaryFile(file_path_str) if len(binary_file.labels) == 0: continue function_names = binary_file.get_functions() with open(out_functions_path, 'w') as out_functions_file: out_functions_file.writelines([f'{f}\n' for f in function_names]) dumped_functions = binary_file.dump_cleaned_asm(out_asm_path) dumped_functions = set(dumped_functions) for func_name in function_names: if func_name not in dumped_functions: print(f'{func_name} detected as a function but label wasnt dumped to asm file') print(f'Processed {filename}') binaries_processed += 1 if binaries_processed >= limit: break print(f'Processed {binaries_processed} binary files') class BinaryFile: def __init__(self, binary_path): self.binary_path = binary_path self.parse_asm() def dump_assembly(self): result = subprocess.run( ['dumpbin', '/DISASM:NOBYTES', self.binary_path], stdout=subprocess.PIPE ) return result.stdout.decode('utf-8') def load_assembly(self): assembly = self.dump_assembly() asm_lines = assembly.split('\n') if len(asm_lines) < 1000: return [] # remove info at start of dump asm_lines = asm_lines[8:] # strip all lines asm_lines = [l.strip() for l in asm_lines if len(l.strip()) != 0] # remove Summary info at end summary_line = None for i in range(1, len(asm_lines)): if asm_lines[-i] == 'Summary': summary_line = -i asm_lines = asm_lines[:summary_line] self.asm_lines = asm_lines return asm_lines def parse_asm(self): asm_lines = self.load_assembly() self.instructions = OrderedDict() self.labels = {} for line in asm_lines: line_components = re.split('\s+', line) address = int(line_components[0].replace(':', ''), 16) instruction = ' '.join(line_components[1:]) if len(line_components) >= 3: is_operand_address = re.match('^[0-9A-Fa-f]+$', line_components[2]) else: is_operand_address = False # check for call instructions if ( line_components[1] == 'call' and len(line_components) == 3 and is_operand_address ): call_address_str = line_components[2] call_address = int(call_address_str, 16) self.labels[call_address] = f'sub_{call_address_str}' # check for jump instructions if ( line_components[1].startswith('j') and len(line_components) == 3 and is_operand_address ): jump_address_str = line_components[2] jump_address = int(jump_address_str, 16) jump_label = f'.L{jump_address_str}' self.labels[jump_address] = jump_label # replace address reference with label instruction = instruction.replace(jump_address_str, jump_label) self.instructions[address] = instruction def get_functions(self): functions = [] # filter out any functions that refer to stubs for label_address in self.labels: label = self.labels[label_address] if not label.startswith('sub_'): continue if label_address not in self.instructions: continue first_function_instruction = self.instructions[label_address] if not first_function_instruction.startswith('jmp'): functions.append(label) return functions def dump_cleaned_asm(self, out_file_name): functions_dumped = [] with open(out_file_name, 'w') as out_file: for address in self.instructions: instruction = self.instructions[address] # check for a label if address in self.labels: label = self.labels[address] if label.startswith('sub_'): functions_dumped.append(label) label_line = f'{label}:\n' out_file.write(label_line) out_file.write(f' {instruction}\n') return functions_dumped collection = BinaryCollection('C:\\Users\\Adam\\Downloads\\CryptoRansomware', 'C:\\Users\\Adam\\Developer\\CryptoFunctionDetection\\windows_asm_dump\\dumped_output_ransomware') collection.process_all()
[ "xmeadamx@gmail.com" ]
xmeadamx@gmail.com
fb74861083c7c05217f67d3770b4226f195711b3
6a3cc4f914cc087b50105aaeab93178afeb70c53
/demo/celery_utils.py
c1db0242cf8231c2f557a6f6ef597b80f2995a7f
[]
no_license
buendiya/celery_demo
f34983d42718c91ad9492b0f5d6d0cce06b0023b
b4e76ab601ccbde0f1215cc95dc5dd15ce90bffd
refs/heads/master
2020-04-15T15:23:32.877777
2016-07-17T07:06:48
2016-07-17T07:06:48
47,396,768
0
0
null
null
null
null
UTF-8
Python
false
false
1,689
py
from __future__ import absolute_import import logging from raven import Client from raven.contrib.celery import register_signal, register_logger_signal from raven.handlers.logging import SentryHandler from celery import Celery as _Celery from celery.signals import after_setup_task_logger def add_sentry_handler_to_celery_task_logger(client, sentry_handler_log_level): handler = SentryHandler(client) handler.setLevel(sentry_handler_log_level) def process_task_logger_event(sender, logger, loglevel, logfile, format, colorize, **kw): for h in logger.handlers: if type(h) == SentryHandler: return False logger.addHandler(handler) after_setup_task_logger.connect(process_task_logger_event, weak=False) class CeleryWithSentryInit(_Celery): """ using: app = CeleryWithSentryInit(main='tasks', broker=os.environ.get('%s.broker', "amqp://@"), sentry_dsn = os.environ.get('%s.dsn', 'http://'),) """ def __init__(self, sentry_dsn=None, sentry_handler_log_level=logging.ERROR, **kwargs): super(CeleryWithSentryInit, self).__init__(**kwargs) self.sentry_dsn = sentry_dsn self.sentry_handler_log_level = sentry_handler_log_level def on_configure(self): if self.sentry_dsn: client = Client(self.sentry_dsn) # register a custom filter to filter out duplicate logs register_logger_signal(client) # hook into the Celery error handler register_signal(client) add_sentry_handler_to_celery_task_logger(client, self.sentry_handler_log_level)
[ "jingshuaizhen@haomaiyi.com" ]
jingshuaizhen@haomaiyi.com
9865b88c475ace4758d045e6cc6381d76023a4f4
0eb47df15f00b98688e15cc29304c984e3ea8131
/ucsgnet/ucsgnet/net_2d.py
ffc4ec7a510345d6d7df3e0ada6d80caccb3c882
[ "MIT" ]
permissive
kacperkan/ucsgnet
b7570a40d76f6472b9889ffdd523ec7f2bb176ec
c13b204361e59c5b92a7983e929305e17a906b65
refs/heads/master
2023-01-21T21:30:25.563317
2020-11-25T09:15:19
2020-11-25T09:15:19
270,302,702
31
7
null
null
null
null
UTF-8
Python
false
false
14,840
py
import argparse import typing as t from collections import OrderedDict, defaultdict import numpy as np import pytorch_lightning as pl import torch import torch.optim as optim import torchvision from torch.optim.optimizer import Optimizer from torch.utils.data import DataLoader from ucsgnet.common import THREADS, TrainingStage from ucsgnet.dataset import SimpleDataset, get_simple_2d_transforms from ucsgnet.ucsgnet.csg_layers import RelationLayer from ucsgnet.ucsgnet.extractors import Decoder, Extractor2D from ucsgnet.ucsgnet.losses import get_composite_loss from ucsgnet.ucsgnet.metrics import mse from ucsgnet.ucsgnet.model import CSGNet from ucsgnet.ucsgnet.shape_evaluators import create_compound_evaluator from ucsgnet.utils import get_simple_dataset_paths_from_config class Net(pl.LightningModule): def __init__(self, hparams: argparse.Namespace): super().__init__() self.hparams = hparams self.net = CSGNet( Extractor2D(), Decoder(), create_compound_evaluator( self.hparams.use_planes, self.hparams.shapes_per_type, self.hparams.num_dimensions, ), self.hparams.shapes_per_type, self.hparams.out_shapes_per_layer, self.hparams.weight_binarizing_threshold, self.hparams.num_csg_layers, ) self.train_split_config_: t.Optional[str] = None self.valid_split_config_: t.Optional[str] = None self.data_path_: t.Optional[str] = None self.__optimizers: t.Optional[t.Sequence[Optimizer]] = None self._base_mode = TrainingStage.INITIAL_TRAINING ( trainable_params_count, non_trainable_params_count, ) = self.num_of_parameters print("Num of trainable params: {}".format(trainable_params_count)) print( "Num of not trainable params: {}".format( non_trainable_params_count ) ) def turn_fine_tuning_mode(self): self.switch_mode(TrainingStage.FINE_TUNING) def turn_initial_training_mode(self): self.switch_mode(TrainingStage.INITIAL_TRAINING) def switch_mode(self, new_mode: TrainingStage): self._base_mode = new_mode self.net.switch_mode(new_mode) def build( self, train_split_config: str, valid_split_config: str, data_path: str ): self.train_split_config_ = train_split_config self.valid_split_config_ = valid_split_config self.data_path_ = data_path @property def num_of_parameters(self) -> t.Tuple[int, int]: total_trainable_params = 0 total_nontrainable_params = 0 for param in self.parameters(recurse=True): if param.requires_grad: total_trainable_params += np.prod(param.shape) else: total_nontrainable_params += np.prod(param.shape) return total_trainable_params, total_nontrainable_params def forward( self, images: torch.Tensor, points: torch.Tensor, *, return_distances_to_base_shapes: bool = False, return_intermediate_output_csg: bool = False, return_scaled_distances_to_shapes: bool = False, retain_latent_code: bool = False, retain_shape_params: bool = False, ) -> t.Union[torch.Tensor, t.Tuple[torch.Tensor, ...]]: return self.net( images, points, return_distances_to_base_shapes=return_distances_to_base_shapes, return_intermediate_output_csg=return_intermediate_output_csg, return_scaled_distances_to_shapes=return_scaled_distances_to_shapes, retain_shape_params=retain_shape_params, retain_latent_code=retain_latent_code, ) def training_step( self, batch: t.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor], batch_idx: int, ) -> t.Dict[str, t.Any]: self.logger.train() image, points, trues, bounding_volume = batch predictions, distances_to_base_shapes, intermediate_results = self( image, points, return_distances_to_base_shapes=True, return_intermediate_output_csg=True, ) total_loss, partial_losses_dict = get_composite_loss( predictions, trues, bounding_volume, points, intermediate_results, self.net.csg_layers_, self.net.evaluator_, self._base_mode, self.net.use_planes, self.global_step, self.net.scaler_, ) if self.hparams.use_planes: self.logger.log_histogram( f"planes_params", self.net.evaluator_.last_predicted_parameters.reshape((-1,)), self.global_step, ) else: for j, (name, tensor) in enumerate( self.net.evaluator_.get_all_last_predicted_parameters_of_shapes() ): self.logger.log_histogram( f"evaluate_{name}_0_{j}", tensor.reshape((-1,)), self.global_step, ) translation_vectors = ( self.net.evaluator_.get_all_translation_vectors() ) self.logger.log_histogram( f"translation_x_0", translation_vectors[..., 0].reshape((-1,)), self.global_step, ) self.logger.log_histogram( f"translation_y_0", translation_vectors[..., 1].reshape((-1,)), self.global_step, ) if self.hparams.num_dimensions == 3: self.logger.log_histogram( f"translation_z_0", translation_vectors[..., 2].reshape((-1,)), self.global_step, ) for i, layer in enumerate(self.net.csg_layers_): # type: RelationLayer self.logger.log_histogram( f"rel_layer_dist_temp_{i}/vals", layer.temperature_.reshape((-1,)), self.global_step, ) self.logger.log_histogram( "scaler/m", self.net.scaler_.m.reshape((-1,)), self.global_step ) tqdm_dict = { "train_loss": total_loss, "train_predictions_avg": predictions.mean(), **{ "train_" + key: value for key, value in partial_losses_dict.items() }, **{ f"lr_{i}": torch.tensor( optimizer.param_groups[0]["lr"], dtype=torch.float ) for i, optimizer in enumerate(self.__optimizers) }, } logger_dict = { "loss": total_loss, "predictions_avg": predictions.mean(), **partial_losses_dict, **{ f"lr_{i}": torch.tensor( optimizer.param_groups[0]["lr"], dtype=torch.float ) for i, optimizer in enumerate(self.__optimizers) }, } output = OrderedDict( {"loss": total_loss, "progress_bar": tqdm_dict, "log": logger_dict} ) return output def validation_step( self, batch: t.Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor], batch_idx: int, ) -> t.Dict[str, t.Any]: image, points, trues, bounding_volume = batch predictions, distances_to_base_shapes, intermediate_results = self( image, points, return_distances_to_base_shapes=True, return_intermediate_output_csg=True, ) total_loss, partial_losses_dict = get_composite_loss( predictions, trues, bounding_volume, points, intermediate_results, self.net.csg_layers_, self.net.evaluator_, self._base_mode, self.net.use_planes, self.global_step, self.net.scaler_, ) logger_dict = { "loss": total_loss, **partial_losses_dict, "mse": mse(self.binarize(predictions), trues), } output = OrderedDict({"loss": total_loss, "log": logger_dict}) return output def validation_end( self, outputs: t.List[t.Dict[str, t.Any]] ) -> t.Dict[str, t.Any]: self.logger.valid() means = defaultdict(int) for output in outputs: for key, value in output["log"].items(): means[key] += value means = {key: value / len(outputs) for key, value in means.items()} logger_dict = means tqdm_dict = { "valid_" + key: value.item() for key, value in means.items() } result = { "valid_loss": means["loss"], "progress_bar": tqdm_dict, "log": logger_dict, } return result def configure_optimizers( self, ) -> t.Tuple[t.Sequence[Optimizer], t.Sequence[optim.lr_scheduler.StepLR]]: optimizer = optim.Adam( self.parameters(), lr=self.hparams.lr, betas=(self.hparams.beta1, self.hparams.beta2), ) self.__optimizers = [optimizer] return [optimizer], [] def _dataloader_simple( self, training: bool, split_path: str ) -> DataLoader: batch_size = self.hparams.batch_size renders = get_simple_dataset_paths_from_config( self.data_path_, split_path ) transforms = get_simple_2d_transforms() loader = DataLoader( dataset=SimpleDataset( renders, None, self.hparams.points_per_sample_in_batch, transforms, ), batch_size=batch_size, shuffle=training, drop_last=training, num_workers=THREADS, ) return loader def train_dataloader(self) -> DataLoader: return self._dataloader_simple(True, self.train_split_config_) def val_dataloader(self) -> DataLoader: return self._dataloader_simple(False, self.valid_split_config_) def __next_elem_from_loader( self, loader: DataLoader ) -> t.Tuple[torch.Tensor, ...]: images, coords, distances, _ = next(iter(loader)) if self.on_gpu: images = images.cuda() coords = coords.cuda() distances = distances.cuda() return images, coords, distances def on_epoch_end(self): val_loader = self.val_dataloader() (images, coords, distances) = self.__next_elem_from_loader(val_loader) images = images[:16] coords = coords[:16] distances = distances[:16] b, c, h, w = images.shape final_predictions = self(images, coords).reshape((b, c, h, w)) input_images = torchvision.utils.make_grid(images, normalize=True) gt = torchvision.utils.make_grid( distances.view_as(images), normalize=True ) pred_grid = torchvision.utils.make_grid( final_predictions, normalize=True ) binarized_pred_grid = torchvision.utils.make_grid( self.binarize(final_predictions), normalize=True ) self.logger.experiment.add_image( "input_images", input_images, self.current_epoch ) self.logger.experiment.add_image("gt", gt, self.current_epoch) self.logger.experiment.add_image( "reconstruction", pred_grid, self.current_epoch ) self.logger.experiment.add_image( "binarized_pred", binarized_pred_grid, self.current_epoch ) @classmethod def binarize(cls, predictions: torch.Tensor) -> torch.Tensor: return (predictions >= 0.5).float() @staticmethod def add_model_specific_args( parent_parser: argparse.ArgumentParser, ) -> argparse.ArgumentParser: parser = argparse.ArgumentParser(parents=[parent_parser]) parser.add_argument( "--num_dimensions", help="Number of dimensions to be evaulated on", type=int, default=2, ) parser.add_argument( "--shapes_per_type", help=( "Number of shapes per type, ex. 64 will create 64 squares and " "64 circles" ), type=int, default=8, ) parser.add_argument( "--lr", help="Learning rate of the optimizer", type=float, default=1e-3, ) parser.add_argument( "--beta1", help="Beta_1 parameter of the Adam optimizer", type=float, default=0.5, ) parser.add_argument( "--beta2", help="Beta_2 parameter of the Adam optimizer", type=float, default=0.99, ) parser.add_argument( "--batch_size", help="Batch size", type=int, default=16 ) parser.add_argument( "--points", type=int, dest="points_per_sample_in_batch", help="Number of SDF samples per sample in a batch.", default=1024, ) parser.add_argument( "--sampling_count", type=int, help="Num of sampling to perform in relational layers", default=5, ) parser.add_argument( "--out_shapes_per_layer", type=int, help="Number of output shapes per layer", default=2, ) parser.add_argument( "--weight_binarizing_threshold", type=float, help=( "Thresholding value for weights. If weight > `threshold` " "then it is set to 1. If -`threshold` < weight <= " "`threshold then set 0 and to -1 otherwise." ), default=0.1, ) parser.add_argument( "--use_planes", action="store_true", help=( "Whether use normal shapes (circles, squares etc.) or " "planes that are combined later. Note, that for planes, " "it is recommended to set `shapes_per_type` much higher" ), ) parser.add_argument( "--num_csg_layers", type=int, help="Number of relation prediction layers", default=2, ) return parser
[ "kacp.kania@gmail.com" ]
kacp.kania@gmail.com
e341b10365879ae8db5489b7764144b40ca6dff3
279d3252b0275562ed43a4d978b4cdd27269b0a7
/knn.py
50ed87c3eaac50390d6ee8d694c94e3668d3da06
[ "BSD-3-Clause" ]
permissive
rishabhjain16/Machine-Learning-1
02d45879fae5f8934687976a58d7179fe15b175a
69d858a29e6516b6913ff2b41acd3f9d62ba5115
refs/heads/master
2022-07-11T01:53:26.385406
2020-05-17T20:30:29
2020-05-17T20:30:29
264,460,595
0
0
null
null
null
null
UTF-8
Python
false
false
2,204
py
# -*- coding: utf-8 -*- """ @author: Rishabh Jain """ # Importing the libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Importing the dataset dataset = pd.read_csv('hazelnut.csv') X = dataset.iloc[:, [0,1,3,4,6,7,8,9,10]] y = dataset.iloc[:, 11].values # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0) from sklearn.neighbors import KNeighborsRegressor knn = KNeighborsRegressor(n_neighbors=3) # Fit the classifier to the data knn.fit(X_train,y_train) y_pred = knn.predict(X_test) y_pred knn.score(X_test, y_test) df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred}) df plot = sns.distplot(y_test, hist=False, color="r", label="Actual Value") sns.distplot(y_pred, hist=False, color="b", label="Fitted Values" , ax=plot) from sklearn import metrics print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred)) print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) plot = sns.distplot(y_test, hist=False, color="r", label="Actual Value") sns.distplot(y_pred, hist=False, color="b", label="Fitted Values" , ax=plot) # 10 fold Cross Validation from sklearn.model_selection import cross_val_score #train model with cv of 10 cv_scores = cross_val_score(knn, X, y, cv=10) #print each cv score (accuracy) and average them print(cv_scores) print(np.mean(cv_scores)) #k errors from sklearn import neighbors from sklearn.metrics import mean_squared_error from math import sqrt rmse_val = [] #to store rmse values for different k for K in range(50): K = K+1 model = neighbors.KNeighborsRegressor(n_neighbors = K) model.fit(X_train, y_train) #fit the model pred=model.predict(X_test) #make prediction on test set error = sqrt(mean_squared_error(y_test,pred)) #calculate rmse rmse_val.append(error) #store rmse values print('RMSE value for k= ' , K , 'is:', error)
[ "noreply@github.com" ]
rishabhjain16.noreply@github.com
6b1a7d1059f1835517ce19b8a3246538105ed4a5
bbea1678dbf015dcb31f844e9a632f18df101cb1
/InsertInOrderAndInsertionSort.py
27b9193e3c24682943671121ec26d1b981510337
[]
no_license
aidanrfraser/CompSci106
e6597e326da890d42feea8ce70ce640b571322cf
1e905a8b1cdc019cb79991f4aaa116fa5c18ecc9
refs/heads/master
2020-09-13T00:23:02.126036
2020-08-25T23:51:04
2020-08-25T23:51:04
222,604,255
1
0
null
null
null
null
UTF-8
Python
false
false
792
py
from cisc106 import assertEqual def insert_in_order(n, alist): """ Inserts n into its respective place in alist """ if not alist: return [n] else: if n < alist[0]: return [n] + alist else: return alist[0:1] + insert_in_order(n, alist[1:]) assertEqual(insert_in_order(1, [2, 3]), [1, 2, 3]) assertEqual(insert_in_order(1, [1, 2, 3]), [1, 1, 2, 3]) assertEqual(insert_in_order(1, []), [1]) def insertionSort(alist): """ Sorts alist via instertion """ if not alist: return alist else: return insert_in_order(alist[0], insertionSort(alist[1:])) assertEqual(insertionSort([3, 2, 1]), [1, 2, 3]) assertEqual(insertionSort([]), []) assertEqual(insertionSort([1]), [1])
[ "54653789+aidanrfraser@users.noreply.github.com" ]
54653789+aidanrfraser@users.noreply.github.com
09b65d768f9b9686b72cd9958c1122f6bc4e66af
b7efe6df88602633e63c4e615db9af22d679241d
/Chapter3-loading_displaying_and_saving/load_display_save.py
2ceec2ca6baad96079467a26fb4915f1b716c1ea
[]
no_license
amuLee/PPoa
042bc8adde9abc60207006b8da04003e23b5bd4c
65d3a9a47a3e4536646d8c3f79aac26da00c4b24
refs/heads/master
2020-05-20T05:23:28.704216
2019-05-15T09:16:22
2019-05-15T09:16:22
185,402,360
1
2
null
null
null
null
UTF-8
Python
false
false
472
py
from __future__ import print_function import argparse import cv2 ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path to the image") args = vars(ap.parse_args()) image = cv2.imread(args["image"]) print("width: {} pixels".format(image.shape[1])) print("height: {} pixels".format(image.shape[0])) print("channels: {}".format(image.shape[2])) cv2.imshow("Image", image) cv2.waitKey(0) cv2.imwrite("newimage.jpg", image)
[ "leeamu340@gmail.com" ]
leeamu340@gmail.com
c3eef6d5636fc89030c505b2336108a8410143d5
c841ccb89033f31f483366db61eb743ae5893bb4
/x_arrary_test.py
0db5e2967645f0769f85eb6a1ec8e403cbf951a3
[]
no_license
kenneth-meyer/deepdishdata
1ddf2a1cc7110b674feac0b4d4d33858c36bf8db
418a95853c64d91ad80729ce3924258bcb2083ff
refs/heads/master
2020-09-13T13:27:15.125327
2019-11-21T00:02:37
2019-11-21T00:02:37
222,797,562
1
0
null
null
null
null
UTF-8
Python
false
false
127
py
import pandas as pd data_df = pd.read_csv("/home/d3/deepdishdata/AoT_Chicago.complete.recent.csv") print(data_df.head(50))
[ "d3@deepdishdata-3423-wd-login.c.cloudycluster19-den-2253.internal" ]
d3@deepdishdata-3423-wd-login.c.cloudycluster19-den-2253.internal
b6f72bb86b3a305dc12f17daf5ed00670c12a129
485b781c6000259f4f27a9380e75ef76b04dd79d
/tests/expectdata/statements/load_stockrow_cat_quarterly.py
0ea5ee39e665b3c4d3c63adda507d1df4f0a1294
[ "MIT" ]
permissive
azafrob/py-finstmt
5858693d2d78647c69dff46802d56a4e143ca9e1
7903bce83b31e4425ac680020bf7d3536ed1ed11
refs/heads/master
2023-08-25T02:39:50.898489
2020-12-30T22:57:10
2020-12-30T22:57:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
28,768
py
import pandas as pd LOAD_STOCKROW_CAT_Q_INDEX_str = ["2009-12-31 00:00:00", "2010-03-31 00:00:00", "2010-06-30 00:00:00", "2010-09-30 00:00:00", "2010-12-31 00:00:00", "2011-03-31 00:00:00", "2011-06-30 00:00:00", "2011-09-30 00:00:00", "2011-12-31 00:00:00", "2012-03-31 00:00:00", "2012-06-30 00:00:00", "2012-09-30 00:00:00", "2012-12-31 00:00:00", "2013-03-31 00:00:00", "2013-06-30 00:00:00", "2013-09-30 00:00:00", "2013-12-31 00:00:00", "2014-03-31 00:00:00", "2014-06-30 00:00:00", "2014-09-30 00:00:00", "2014-12-31 00:00:00", "2015-03-31 00:00:00", "2015-06-30 00:00:00", "2015-09-30 00:00:00", "2015-12-31 00:00:00", "2016-03-31 00:00:00", "2016-06-30 00:00:00", "2016-09-30 00:00:00", "2016-12-31 00:00:00", "2017-03-31 00:00:00", "2017-06-30 00:00:00", "2017-09-30 00:00:00", "2017-12-31 00:00:00", "2018-03-31 00:00:00", "2018-06-30 00:00:00", "2018-09-30 00:00:00", "2018-12-31 00:00:00", "2019-03-31 00:00:00", "2019-06-30 00:00:00", "2019-09-30 00:00:00"] LOAD_STOCKROW_CAT_Q_INDEX = [pd.to_datetime(val) for val in LOAD_STOCKROW_CAT_Q_INDEX_str] LOAD_STOCKROW_CAT_Q_INDEX_DATA_DICT = dict( revenue=pd.Series( [7898000000.0, 8238000000.0, 10409000000.0, 11134000000.0, 12807000000.0, 12949000000.0, 14230000000.0, 15716000000.0, 17243000000.0, 15981000000.0, 17374000000.0, 16445000000.0, 16075000000.0, 13210000000.0, 14621000000.0, 13423000000.0, 14402000000.0, 13241000000.0, 14150000000.0, 13549000000.0, 14244000000.0, 12702000000.0, 12317000000.0, 10962000000.0, 11030000000.0, 9461000000.0, 10342000000.0, 9160000000.0, 9574000000.0, 9822000000.0, 11331000000.0, 11413000000.0, 12896000000.0, 12859000000.0, 14011000000.0, 13510000000.0, 14342000000.0, 13466000000.0, 14432000000.0, 12758000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), cogs=pd.Series( [6090000000.0, 6127000000.0, 7606000000.0, 7979000000.0, 9569000000.0, 9260000000.0, 10512000000.0, 11666000000.0, 12966000000.0, 11441000000.0, 12478000000.0, 11836000000.0, 12097000000.0, 9828000000.0, 10958000000.0, 9952000000.0, 10716000000.0, 9597000000.0, 10350000000.0, 9791000000.0, 11604000000.0, 8910000000.0, 8822000000.0, 8014000000.0, 8387000000.0, 6974000000.0, 7567000000.0, 6674000000.0, 7425000000.0, 6960000000.0, 7978000000.0, 7841000000.0, 9127000000.0, 8732000000.0, 9604000000.0, 9207000000.0, 10176000000.0, 9193000000.0, 10133000000.0, 8758000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), gross_profit=pd.Series( [1808000000.0, 2111000000.0, 2803000000.0, 3155000000.0, 3238000000.0, 3689000000.0, 3718000000.0, 4050000000.0, 4277000000.0, 4540000000.0, 4896000000.0, 4609000000.0, 3978000000.0, 3382000000.0, 3663000000.0, 3471000000.0, 3686000000.0, 3644000000.0, 3800000000.0, 3758000000.0, 2640000000.0, 3792000000.0, 3495000000.0, 2948000000.0, 2643000000.0, 2487000000.0, 2775000000.0, 2486000000.0, 2149000000.0, 2862000000.0, 3353000000.0, 3572000000.0, 3769000000.0, 4127000000.0, 4407000000.0, 4303000000.0, 4166000000.0, 4273000000.0, 4299000000.0, 4000000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), rd_exp=pd.Series( [355000000.0, 402000000.0, 450000000.0, 510000000.0, 543000000.0, 525000000.0, 584000000.0, 584000000.0, 604000000.0, 587000000.0, 632000000.0, 634000000.0, 613000000.0, 562000000.0, 548000000.0, 469000000.0, 467000000.0, 508000000.0, 516000000.0, 533000000.0, 823000000.0, 524000000.0, 510000000.0, 513000000.0, 572000000.0, 508000000.0, 468000000.0, 453000000.0, 424000000.0, 425000000.0, 458000000.0, 461000000.0, 498000000.0, 443000000.0, 462000000.0, 479000000.0, 466000000.0, 435000000.0, 441000000.0, 431000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), sga=pd.Series( [942000000.0, 932000000.0, 1059000000.0, 1148000000.0, 1109000000.0, 1099000000.0, 1257000000.0, 1360000000.0, 1487000000.0, 1340000000.0, 1517000000.0, 1471000000.0, 1591000000.0, 1390000000.0, 1421000000.0, 1319000000.0, 1417000000.0, 1292000000.0, 1437000000.0, 1446000000.0, 2354000000.0, 1249000000.0, 1318000000.0, 1129000000.0, 1255000000.0, 1088000000.0, 1123000000.0, 992000000.0, 1180000000.0, 1061000000.0, 1304000000.0, 1254000000.0, 1380000000.0, 1276000000.0, 1440000000.0, 1299000000.0, 1463000000.0, 1319000000.0, 1309000000.0, 1251000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), dep_exp=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), other_op_exp=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), op_exp=pd.Series( [1680000000.0, 1603000000.0, 1826000000.0, 1968000000.0, 1947000000.0, 1856000000.0, 2117000000.0, 2291000000.0, 2317000000.0, 2217000000.0, 2280000000.0, 2013000000.0, 2940000000.0, 2164000000.0, 2106000000.0, 2070000000.0, 2234000000.0, 2246000000.0, 2325000000.0, 2366000000.0, 3591000000.0, 2090000000.0, 2162000000.0, 2023000000.0, 2818000000.0, 1993000000.0, 1990000000.0, 2005000000.0, 2747000000.0, 2482000000.0, 2169000000.0, 2063000000.0, 2382000000.0, 2019000000.0, 2240000000.0, 2168000000.0, 2283000000.0, 2066000000.0, 2086000000.0, 1980000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), ebit=pd.Series( [229000000.0, 566000000.0, 997000000.0, 1172000000.0, 1276000000.0, 1824000000.0, 1423000000.0, 1727000000.0, 2070000000.0, 2388000000.0, 2681000000.0, 2581000000.0, 1026000000.0, 1246000000.0, 1467000000.0, 1372000000.0, 1488000000.0, 1450000000.0, 1538000000.0, 1509000000.0, -869000000.0, 1895000000.0, 1262000000.0, 904000000.0, -126000000.0, 492000000.0, 864000000.0, 505000000.0, -1231000000.0, 405000000.0, 1284000000.0, 1647000000.0, 1288000000.0, 2238000000.0, 2299000000.0, 2244000000.0, 1468000000.0, 2371000000.0, 2288000000.0, 2115000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), int_exp=pd.Series( [88000000.0, 102000000.0, 81000000.0, 85000000.0, 75000000.0, 87000000.0, 90000000.0, 112000000.0, 107000000.0, 113000000.0, 110000000.0, 129000000.0, 115000000.0, 120000000.0, 120000000.0, 116000000.0, 109000000.0, 110000000.0, 120000000.0, 128000000.0, 126000000.0, 129000000.0, 125000000.0, 127000000.0, 126000000.0, 129000000.0, 130000000.0, 126000000.0, 120000000.0, 123000000.0, 121000000.0, 118000000.0, 169000000.0, 101000000.0, 102000000.0, 102000000.0, 99000000.0, 103000000.0, 103000000.0, 103000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), gain_on_sale_invest=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), gain_on_sale_asset=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), impairment=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), ebt=pd.Series( [141000000.0, 464000000.0, 916000000.0, 1087000000.0, 1201000000.0, 1737000000.0, 1333000000.0, 1615000000.0, 1963000000.0, 2275000000.0, 2571000000.0, 2452000000.0, 911000000.0, 1126000000.0, 1347000000.0, 1256000000.0, 1379000000.0, 1340000000.0, 1418000000.0, 1381000000.0, -995000000.0, 1766000000.0, 1137000000.0, 777000000.0, -252000000.0, 363000000.0, 734000000.0, 379000000.0, -1351000000.0, 282000000.0, 1163000000.0, 1529000000.0, 1119000000.0, 2137000000.0, 2197000000.0, 2142000000.0, 1369000000.0, 2268000000.0, 2185000000.0, 2012000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), tax_exp=pd.Series( [-91000000.0, 231000000.0, 209000000.0, 295000000.0, 233000000.0, 512000000.0, 318000000.0, 474000000.0, 416000000.0, 689000000.0, 872000000.0, 753000000.0, 214000000.0, 246000000.0, 387000000.0, 310000000.0, 376000000.0, 418000000.0, 419000000.0, 364000000.0, -509000000.0, 521000000.0, 335000000.0, 218000000.0, -158000000.0, 92000000.0, 184000000.0, 96000000.0, -180000000.0, 90000000.0, 361000000.0, 470000000.0, 2418000000.0, 472000000.0, 490000000.0, 415000000.0, 321000000.0, 387000000.0, 565000000.0, 518000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), net_income=pd.Series( [232000000.0, 233000000.0, 707000000.0, 792000000.0, 968000000.0, 1225000000.0, 1015000000.0, 1141000000.0, 1547000000.0, 1586000000.0, 1699000000.0, 1699000000.0, 697000000.0, 880000000.0, 960000000.0, 946000000.0, 1003000000.0, 922000000.0, 999000000.0, 1017000000.0, -486000000.0, 1245000000.0, 802000000.0, 559000000.0, -94000000.0, 271000000.0, 550000000.0, 283000000.0, -1171000000.0, 192000000.0, 802000000.0, 1059000000.0, -1299000000.0, 1665000000.0, 1707000000.0, 1727000000.0, 1048000000.0, 1881000000.0, 1620000000.0, 1494000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), cash=pd.Series( [4867000000.0, 3538000000.0, 3597000000.0, 2265000000.0, 3592000000.0, 4869000000.0, 10715000000.0, 3229000000.0, 3057000000.0, 2864000000.0, 5103000000.0, 5689000000.0, 5490000000.0, 5982000000.0, 6110000000.0, 6357000000.0, 6081000000.0, 5345000000.0, 7927000000.0, 6082000000.0, 7341000000.0, 7563000000.0, 7821000000.0, 6046000000.0, 6460000000.0, 5886000000.0, 6764000000.0, 6113000000.0, 7168000000.0, 9472000000.0, 10232000000.0, 9591000000.0, 8261000000.0, 7888000000.0, 8654000000.0, 8007000000.0, 7857000000.0, 7128000000.0, 7429000000.0, 7906000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), st_invest=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), cash_and_st_invest=pd.Series( [4867000000.0, 3538000000.0, 3597000000.0, 2265000000.0, 3592000000.0, 4869000000.0, 10715000000.0, 3229000000.0, 3057000000.0, 2864000000.0, 5103000000.0, 5689000000.0, 5490000000.0, 5982000000.0, 6110000000.0, 6357000000.0, 6081000000.0, 5345000000.0, 7927000000.0, 6082000000.0, 7341000000.0, 7563000000.0, 7821000000.0, 6046000000.0, 6460000000.0, 5886000000.0, 6764000000.0, 6113000000.0, 7168000000.0, 9472000000.0, 10232000000.0, 9591000000.0, 8261000000.0, 7888000000.0, 8654000000.0, 8007000000.0, 7857000000.0, 7128000000.0, 7429000000.0, 7906000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), receivables=pd.Series( [27162000000.0, 27070000000.0, 27169000000.0, 28250000000.0, 28849000000.0, 29348000000.0, 29997000000.0, 30034000000.0, 30803000000.0, 31632000000.0, 32584000000.0, 33257000000.0, 33911000000.0, 34164000000.0, 33812000000.0, 33724000000.0, 33499000000.0, 33889000000.0, 34190000000.0, 33176000000.0, 32772000000.0, 31588000000.0, 31413000000.0, 30462000000.0, 30507000000.0, 30852000000.0, 30396000000.0, 29453000000.0, 29088000000.0, 29587000000.0, 29732000000.0, 29836000000.0, 30725000000.0, 31029000000.0, 31299000000.0, 31171000000.0, 31899000000.0, 31716000000.0, 32150000000.0, 31072000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), inventory=pd.Series( [6360000000.0, 6990000000.0, 7339000000.0, 9006000000.0, 9587000000.0, 10676000000.0, 11359000000.0, 14412000000.0, 14544000000.0, 16511000000.0, 17344000000.0, 17550000000.0, 15547000000.0, 15074000000.0, 13889000000.0, 13392000000.0, 12625000000.0, 12888000000.0, 13055000000.0, 13328000000.0, 12205000000.0, 12099000000.0, 11681000000.0, 11150000000.0, 9700000000.0, 9849000000.0, 9458000000.0, 9478000000.0, 8614000000.0, 9082000000.0, 9388000000.0, 10212000000.0, 10018000000.0, 10947000000.0, 11255000000.0, 11814000000.0, 11529000000.0, 12340000000.0, 12007000000.0, 12180000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), def_tax_st=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), other_current_assets=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_current_assets=pd.Series( [27217000000.0, 26412000000.0, 27376000000.0, 28577000000.0, 31810000000.0, 34608000000.0, 40755000000.0, 36864000000.0, 37900000000.0, 40209000000.0, 44294000000.0, 44639000000.0, 42138000000.0, 42145000000.0, 40805000000.0, 40088000000.0, 38335000000.0, 37968000000.0, 41276000000.0, 39042000000.0, 38867000000.0, 38491000000.0, 38227000000.0, 35280000000.0, 33508000000.0, 33748000000.0, 33606000000.0, 31999000000.0, 31967000000.0, 35548000000.0, 36991000000.0, 37185000000.0, 36244000000.0, 37357000000.0, 38641000000.0, 38454000000.0, 38603000000.0, 39126000000.0, 39789000000.0, 39160000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), gross_ppe=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), dep=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), net_ppe=pd.Series( [12386000000.0, 12057000000.0, 11763000000.0, 12065000000.0, 12539000000.0, 12219000000.0, 12430000000.0, 13397000000.0, 14395000000.0, 14571000000.0, 14928000000.0, 15509000000.0, 16461000000.0, 16276000000.0, 16352000000.0, 16588000000.0, 17075000000.0, 16716000000.0, 16690000000.0, 16431000000.0, 16577000000.0, 16277000000.0, 16136000000.0, 15955000000.0, 16090000000.0, 15935000000.0, 15916000000.0, 15680000000.0, 15322000000.0, 14727000000.0, 14420000000.0, 14187000000.0, 14155000000.0, 13912000000.0, 13752000000.0, 13607000000.0, 13574000000.0, 13259000000.0, 13172000000.0, 12842000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), goodwill=pd.Series( [2734000000.0, 2772000000.0, 2777000000.0, 3458000000.0, 3419000000.0, 3402000000.0, 3385000000.0, 12307000000.0, 11448000000.0, 11368000000.0, 11556000000.0, 11538000000.0, 10958000000.0, 10715000000.0, 10578000000.0, 10686000000.0, 10552000000.0, 10495000000.0, 10367000000.0, 10011000000.0, 9770000000.0, 9383000000.0, 9413000000.0, 9387000000.0, 9436000000.0, 9451000000.0, 9329000000.0, 9178000000.0, 8369000000.0, 8338000000.0, 8374000000.0, 8371000000.0, 8311000000.0, 8539000000.0, 8288000000.0, 8209000000.0, 8114000000.0, 7998000000.0, 7944000000.0, 7772000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), lt_invest=pd.Series( [105000000.0, 133000000.0, 154000000.0, 160000000.0, 164000000.0, 140000000.0, 123000000.0, 121000000.0, 133000000.0, 139000000.0, 124000000.0, 199000000.0, 272000000.0, 270000000.0, 288000000.0, 278000000.0, 272000000.0, 266000000.0, 259000000.0, 265000000.0, 257000000.0, 230000000.0, 229000000.0, 231000000.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), def_tax_lt=pd.Series( [3930000000.0, 3711000000.0, 3505000000.0, 3916000000.0, 3424000000.0, 3307000000.0, 3366000000.0, 1928000000.0, 3737000000.0, 3583000000.0, 3717000000.0, 3506000000.0, 3558000000.0, 3598000000.0, 3693000000.0, 3526000000.0, 2147000000.0, 2101000000.0, 2200000000.0, 2131000000.0, 3143000000.0, 2780000000.0, 2914000000.0, 3005000000.0, 2489000000.0, 2486000000.0, 2536000000.0, 2579000000.0, 2790000000.0, 2940000000.0, 2866000000.0, 2845000000.0, 1693000000.0, 1687000000.0, 1626000000.0, 1288000000.0, 1439000000.0, 1378000000.0, 1473000000.0, 1372000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), other_lt_assets=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_non_current_assets=pd.Series( [32821000000.0, 32424000000.0, 31417000000.0, 33065000000.0, 32210000000.0, 31779000000.0, 32856000000.0, 40903000000.0, 43318000000.0, 43699000000.0, 44544000000.0, 45902000000.0, 46832000000.0, 46598000000.0, 46570000000.0, 47184000000.0, 46561000000.0, 46429000000.0, 46551000000.0, 45446000000.0, 45814000000.0, 43999000000.0, 44015000000.0, 43689000000.0, 44834000000.0, 44559000000.0, 44694000000.0, 44403000000.0, 42737000000.0, 42001000000.0, 41519000000.0, 41375000000.0, 40718000000.0, 40657000000.0, 40346000000.0, 39755000000.0, 39906000000.0, 39600000000.0, 39398000000.0, 38833000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_assets=pd.Series( [60038000000.0, 58836000000.0, 58793000000.0, 61642000000.0, 64020000000.0, 66387000000.0, 73611000000.0, 77767000000.0, 81218000000.0, 83908000000.0, 88838000000.0, 90541000000.0, 88970000000.0, 88743000000.0, 87375000000.0, 87272000000.0, 84896000000.0, 84397000000.0, 87827000000.0, 84488000000.0, 84681000000.0, 82490000000.0, 82242000000.0, 78969000000.0, 78342000000.0, 78307000000.0, 78300000000.0, 76402000000.0, 74704000000.0, 77549000000.0, 78510000000.0, 78560000000.0, 76962000000.0, 78014000000.0, 78987000000.0, 78209000000.0, 78509000000.0, 78726000000.0, 79187000000.0, 77993000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), payables=pd.Series( [2993000000.0, 3431000000.0, 3975000000.0, 4970000000.0, 5856000000.0, 5990000000.0, 6858000000.0, 7524000000.0, 8161000000.0, 8360000000.0, 8470000000.0, 7978000000.0, 6753000000.0, 6221000000.0, 6343000000.0, 6280000000.0, 6560000000.0, 6731000000.0, 6860000000.0, 6778000000.0, 6515000000.0, 6328000000.0, 5862000000.0, 5206000000.0, 5023000000.0, 5101000000.0, 5104000000.0, 4713000000.0, 4614000000.0, 5302000000.0, 5778000000.0, 6113000000.0, 6487000000.0, 6938000000.0, 6831000000.0, 6788000000.0, 7051000000.0, 7198000000.0, 7022000000.0, 6141000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), st_debt=pd.Series( [9784000000.0, 8622000000.0, 8927000000.0, 8271000000.0, 7981000000.0, 9691000000.0, 8456000000.0, 7508000000.0, 9648000000.0, 10762000000.0, 12055000000.0, 13326000000.0, 12391000000.0, 13246000000.0, 13871000000.0, 13522000000.0, 11031000000.0, 11290000000.0, 12936000000.0, 11102000000.0, 11501000000.0, 11972000000.0, 10875000000.0, 12335000000.0, 12844000000.0, 13893000000.0, 14343000000.0, 13488000000.0, 13965000000.0, 14557000000.0, 13377000000.0, 11089000000.0, 11031000000.0, 12150000000.0, 12470000000.0, 10332000000.0, 11553000000.0, 11542000000.0, 11514000000.0, 12318000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), current_lt_debt=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), tax_liab_st=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), other_current_liab=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_current_liab=pd.Series( [18975000000.0, 18417000000.0, 19784000000.0, 20178000000.0, 22020000000.0, 23147000000.0, 23510000000.0, 25218000000.0, 28357000000.0, 28849000000.0, 31273000000.0, 31746000000.0, 29415000000.0, 28974000000.0, 29919000000.0, 29119000000.0, 27297000000.0, 27295000000.0, 29741000000.0, 27589000000.0, 27877000000.0, 26566000000.0, 25606000000.0, 25833000000.0, 26242000000.0, 26215000000.0, 27183000000.0, 25290000000.0, 26132000000.0, 27635000000.0, 28133000000.0, 25903000000.0, 26931000000.0, 27402000000.0, 28300000000.0, 26033000000.0, 28218000000.0, 27388000000.0, 27735000000.0, 27201000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), lt_debt=pd.Series( [21847000000.0, 21548000000.0, 20226000000.0, 20337000000.0, 20437000000.0, 19895000000.0, 25926000000.0, 26781000000.0, 24944000000.0, 25191000000.0, 27261000000.0, 26526000000.0, 27752000000.0, 27240000000.0, 25680000000.0, 26015000000.0, 26719000000.0, 26801000000.0, 27307000000.0, 28180000000.0, 27784000000.0, 26803000000.0, 27445000000.0, 25208000000.0, 25169000000.0, 24470000000.0, 23980000000.0, 23622000000.0, 22818000000.0, 23725000000.0, 23815000000.0, 24835000000.0, 23847000000.0, 23165000000.0, 23699000000.0, 25441000000.0, 25000000000.0, 24240000000.0, 24764000000.0, 25588000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_debt=pd.Series( [31631000000.0, 30170000000.0, 29153000000.0, 28608000000.0, 28418000000.0, 29586000000.0, 34382000000.0, 34289000000.0, 34592000000.0, 35953000000.0, 39316000000.0, 39852000000.0, 40143000000.0, 40486000000.0, 39551000000.0, 39537000000.0, 37750000000.0, 38091000000.0, 40243000000.0, 39282000000.0, 39285000000.0, 38775000000.0, 38320000000.0, 37543000000.0, 38013000000.0, 38363000000.0, 38323000000.0, 37110000000.0, 36783000000.0, 38282000000.0, 37192000000.0, 35924000000.0, 34878000000.0, 35315000000.0, 36169000000.0, 35773000000.0, 36553000000.0, 35782000000.0, 36278000000.0, 37906000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), deferred_rev=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), tax_liab_lt=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), deposit_liab=pd.Series( [1217000000.0, 1367000000.0, 1404000000.0, 1470000000.0, 1831000000.0, 1728000000.0, 1823000000.0, 2745000000.0, 2487000000.0, 2921000000.0, 3132000000.0, 3035000000.0, 2638000000.0, 2920000000.0, 2738000000.0, 2699000000.0, 2360000000.0, 2500000000.0, 2344000000.0, 2165000000.0, 1697000000.0, 1636000000.0, 1754000000.0, 1610000000.0, 1146000000.0, 1328000000.0, 1259000000.0, 1161000000.0, 1167000000.0, 1383000000.0, 1533000000.0, 1510000000.0, 1426000000.0, 1399000000.0, 1378000000.0, 1491000000.0, 1243000000.0, 1354000000.0, 1263000000.0, 1309000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), other_lt_liab=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_non_current_liab=pd.Series( [31763000000.0, 30945000000.0, 29305000000.0, 31132000000.0, 30675000000.0, 30109000000.0, 36205000000.0, 37845000000.0, 39459000000.0, 39588000000.0, 41584000000.0, 40859000000.0, 41973000000.0, 41451000000.0, 39778000000.0, 39976000000.0, 36721000000.0, 36733000000.0, 37163000000.0, 38003000000.0, 39978000000.0, 38886000000.0, 39475000000.0, 37168000000.0, 37215000000.0, 36339000000.0, 35814000000.0, 35397000000.0, 35359000000.0, 36254000000.0, 36298000000.0, 36960000000.0, 36265000000.0, 35340000000.0, 35745000000.0, 36286000000.0, 36211000000.0, 35860000000.0, 36574000000.0, 35799000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_liab=pd.Series( [50738000000.0, 49362000000.0, 49089000000.0, 51310000000.0, 52695000000.0, 53256000000.0, 59715000000.0, 63063000000.0, 67816000000.0, 68437000000.0, 72857000000.0, 72605000000.0, 71388000000.0, 70425000000.0, 69697000000.0, 69095000000.0, 64018000000.0, 64028000000.0, 66904000000.0, 65592000000.0, 67855000000.0, 65452000000.0, 65081000000.0, 63001000000.0, 63457000000.0, 62554000000.0, 62997000000.0, 60687000000.0, 61491000000.0, 63889000000.0, 64431000000.0, 62863000000.0, 63196000000.0, 62742000000.0, 64045000000.0, 62319000000.0, 64429000000.0, 63248000000.0, 64309000000.0, 63000000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), common_stock=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), other_income=pd.Series( [-3764000000.0, -3886000000.0, -4045000000.0, -4412000000.0, -4051000000.0, -3724000000.0, -3544000000.0, -4019000000.0, -6328000000.0, -6033000000.0, -6150000000.0, -5988000000.0, -6433000000.0, -6669000000.0, -6698000000.0, -6247000000.0, -3898000000.0, -3801000000.0, -3683000000.0, -4357000000.0, -6431000000.0, -7101000000.0, -6729000000.0, -6843000000.0, -2035000000.0, -1493000000.0, -1633000000.0, -1527000000.0, -2039000000.0, -1827000000.0, -1471000000.0, -1233000000.0, -1192000000.0, -1016000000.0, -1496000000.0, -1568000000.0, -1684000000.0, -1588000000.0, -1499000000.0, -1783000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), retained_earnings=pd.Series( [19711000000.0, 19941000000.0, 20133000000.0, 20955000000.0, 21384000000.0, 22640000000.0, 23081000000.0, 24251000000.0, 25219000000.0, 26815000000.0, 27842000000.0, 29541000000.0, 29558000000.0, 30438000000.0, 30668000000.0, 31614000000.0, 31854000000.0, 32775000000.0, 32961000000.0, 33977000000.0, 33887000000.0, 34998000000.0, 34823000000.0, 35191000000.0, 29246000000.0, 29517000000.0, 29167000000.0, 29450000000.0, 27377000000.0, 27584000000.0, 27471000000.0, 28530000000.0, 26301000000.0, 27929000000.0, 28657000000.0, 30384000000.0, 30427000000.0, 32435000000.0, 32981000000.0, 34477000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), minority_interest=pd.Series( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_equity=pd.Series( [8740000000.0, 8942000000.0, 9185000000.0, 9837000000.0, 10824000000.0, 12629000000.0, 13391000000.0, 14162000000.0, 12883000000.0, 14969000000.0, 15926000000.0, 17884000000.0, 17532000000.0, 18274000000.0, 17621000000.0, 18110000000.0, 20811000000.0, 20305000000.0, 20856000000.0, 18823000000.0, 16746000000.0, 16962000000.0, 17092000000.0, 15896000000.0, 14809000000.0, 15676000000.0, 15232000000.0, 15645000000.0, 13213000000.0, 13660000000.0, 14079000000.0, 15697000000.0, 13766000000.0, 15272000000.0, 14942000000.0, 15890000000.0, 14080000000.0, 15478000000.0, 14878000000.0, 14993000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), total_liab_and_equity=pd.Series( [59478000000.0, 58304000000.0, 58274000000.0, 61147000000.0, 63519000000.0, 65885000000.0, 73106000000.0, 77225000000.0, 80699000000.0, 83406000000.0, 88783000000.0, 90489000000.0, 88920000000.0, 88699000000.0, 87318000000.0, 87205000000.0, 84829000000.0, 84333000000.0, 87760000000.0, 84415000000.0, 84601000000.0, 82414000000.0, 82173000000.0, 78897000000.0, 78266000000.0, 78230000000.0, 78229000000.0, 76332000000.0, 74704000000.0, 77549000000.0, 78510000000.0, 78560000000.0, 76962000000.0, 78014000000.0, 78987000000.0, 78209000000.0, 78509000000.0, 78726000000.0, 79187000000.0, 77993000000.0], index=LOAD_STOCKROW_CAT_Q_INDEX ), )
[ "whoopnip@gmail.com" ]
whoopnip@gmail.com
736142527e57ae2aceb8960a4ea6a9487e7e21fe
2fb22b9c1389a358ab1f9d4a66aca41331e63d35
/core/filters.py
1a755242523255ccb3dd0b61385fa8d749d1a9c2
[]
no_license
hoangnammkt/EcommerceDjango
3e42f78e023b0c081732254bbd7aa26b3351c035
f15e94bd191936e3441a54098c7a24bdff77d885
refs/heads/master
2023-06-24T03:49:27.948248
2021-07-23T03:06:52
2021-07-23T03:06:52
355,046,985
1
0
null
null
null
null
UTF-8
Python
false
false
360
py
import django_filters from .models import * class ProductFilter(django_filters.FilterSet): class Meta: model = Product fields = { 'brandname', 'socket', 'seriescpu', 'memory_type', 'capacity', 'screensize', 'resolution', 'wattage', }
[ "hoangnam921@gmail.com" ]
hoangnam921@gmail.com
5e4ee90a3ca26719d300c5e9032f7fd9c673031c
ff63af0035b0b3688a66b48bf3c209a3e0dbc5aa
/spambayes/spambayes/core_resources/help_gif.py
4649b7f776eb05a9e728f2fa8bef227f2e37c97f
[ "Python-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
smontanaro/spambayes
87767433d2d12de390a2e7204ceb2fa641f209d5
8ae169fde78371780019db5b7b7acfb4950bab8f
refs/heads/master
2023-06-25T06:43:25.595642
2023-06-12T17:51:55
2023-06-12T17:51:55
8,049,522
13
20
null
2021-04-05T21:20:23
2013-02-06T10:53:49
Python
ISO-8859-5
Python
false
false
2,246
py
# -*- coding: ISO-8859-1 -*- """Resource help_gif (from file help.gif)""" # written by resourcepackage: (1, 0, 0) source = 'help.gif' package = 'spambayes.resources' data = "GIF89a\026\000 \000ї\000\000\000{ї\000„ї\010„ї\020„ї\020Œї\030Œї!Œї!”ї)”џ1œџ9œџBœџR­џZ­џcЕџkЕ\ џsЕџ{Нџ„Нџ„ЦџŒЦџœЮџЅЮџЅжџ­жџЕжџНоџЦоџжчџжяџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџџ\ џ!љ\004\001\000\000\036\000,\000\000\000\000\026\000 \000\000\010Ї\000=\010\034HА Сƒ\030\036$ \000\000\000\004\0204\034\034(aУ‹\030\017T0ШС\000Ц\037\025\024\ \034№1РF\017\023\002|D0\001H\012\004#€” P\000Hƒ \017\010\004\011\000чM\017<\013r\0009`ЂС\006 \035\030%ш\000Є€Ѕ\002+‚\014€\001ъ\002ž\ \005$.Eš\024Њ\007\015SЋzхŠ‘ІW\017\0261žнљѓlаЕoнЖ]KЗЎ]Л\025\016\0000prm…}НzФh€n\\Џz3вЕ\000И\ Ў\005Н|яž\015\010\000;" ### end
[ "montanaro@users.sourceforge.net" ]
montanaro@users.sourceforge.net
f9f574a4d00a771aa40f9ddee1222b2e1cf2f25b
f693c9c487d31a677f009afcdf922b4e7f7d1af0
/biomixer-venv/lib/python3.9/site-packages/pylint/checkers/refactoring/recommendation_checker.py
b1175f03e03853db4ae7d542287abf40d715df6b
[ "MIT" ]
permissive
Shellowb/BioMixer
9048b6c07fa30b83c87402284f0cebd11a58e772
1939261589fe8d6584a942a99f0308e898a28c1c
refs/heads/master
2022-10-05T08:16:11.236866
2021-06-29T17:20:45
2021-06-29T17:20:45
164,722,008
1
3
MIT
2022-09-30T20:23:34
2019-01-08T19:52:12
Python
UTF-8
Python
false
false
4,749
py
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/LICENSE import astroid from pylint import checkers, interfaces from pylint.checkers import utils class RecommendationChecker(checkers.BaseChecker): __implements__ = (interfaces.IAstroidChecker,) name = "refactoring" msgs = { "C0200": ( "Consider using enumerate instead of iterating with range and len", "consider-using-enumerate", "Emitted when code that iterates with range and len is " "encountered. Such code can be simplified by using the " "enumerate builtin.", ), "C0201": ( "Consider iterating the dictionary directly instead of calling .keys()", "consider-iterating-dictionary", "Emitted when the keys of a dictionary are iterated through the .keys() " "method. It is enough to just iterate through the dictionary itself, as " 'in "for key in dictionary".', ), } @staticmethod def _is_builtin(node, function): inferred = utils.safe_infer(node) if not inferred: return False return utils.is_builtin_object(inferred) and inferred.name == function @utils.check_messages("consider-iterating-dictionary") def visit_call(self, node): if not isinstance(node.func, astroid.Attribute): return if node.func.attrname != "keys": return if not isinstance(node.parent, (astroid.For, astroid.Comprehension)): return inferred = utils.safe_infer(node.func) if not isinstance(inferred, astroid.BoundMethod) or not isinstance( inferred.bound, astroid.Dict ): return if isinstance(node.parent, (astroid.For, astroid.Comprehension)): self.add_message("consider-iterating-dictionary", node=node) @utils.check_messages("consider-using-enumerate") def visit_for(self, node): """Emit a convention whenever range and len are used for indexing.""" # Verify that we have a `range([start], len(...), [stop])` call and # that the object which is iterated is used as a subscript in the # body of the for. # Is it a proper range call? if not isinstance(node.iter, astroid.Call): return if not self._is_builtin(node.iter.func, "range"): return if not node.iter.args: return is_constant_zero = ( isinstance(node.iter.args[0], astroid.Const) and node.iter.args[0].value == 0 ) if len(node.iter.args) == 2 and not is_constant_zero: return if len(node.iter.args) > 2: return # Is it a proper len call? if not isinstance(node.iter.args[-1], astroid.Call): return second_func = node.iter.args[-1].func if not self._is_builtin(second_func, "len"): return len_args = node.iter.args[-1].args if not len_args or len(len_args) != 1: return iterating_object = len_args[0] if not isinstance(iterating_object, astroid.Name): return # If we're defining __iter__ on self, enumerate won't work scope = node.scope() if iterating_object.name == "self" and scope.name == "__iter__": return # Verify that the body of the for loop uses a subscript # with the object that was iterated. This uses some heuristics # in order to make sure that the same object is used in the # for body. for child in node.body: for subscript in child.nodes_of_class(astroid.Subscript): if not isinstance(subscript.value, astroid.Name): continue value = subscript.slice if isinstance(value, astroid.Index): value = value.value if not isinstance(value, astroid.Name): continue if value.name != node.target.name: continue if iterating_object.name != subscript.value.name: continue if subscript.value.scope() != node.scope(): # Ignore this subscript if it's not in the same # scope. This means that in the body of the for # loop, another scope was created, where the same # name for the iterating object was used. continue self.add_message("consider-using-enumerate", node=node) return
[ "marcelo.becerra@ug.uchile.cl" ]
marcelo.becerra@ug.uchile.cl
f0604ffc282782d11dd705590c81cce0149c6988
d089f7fb112076861cbf7d40b9a18859890fcebe
/common/utils.py
1e25769551e33bb5d12f94db4539a374dcb60233
[ "MIT", "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "Apache-2.0" ]
permissive
hctwgl/cmp
900e6b9cbafcd0c02e467f2141b51ebf0965c3df
6d6236ab8c24768373a483c5d0836263d08e5852
refs/heads/master
2020-05-03T15:34:36.823458
2018-12-08T04:55:44
2018-12-08T04:55:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
12,440
py
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available. Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 开发框架公用方法 1. 页面输入内容转义(防止xss攻击) from common.utils import html_escape, url_escape, texteditor_escape 2. 转义html内容 html_content = html_escape(input_content) 3. 转义url内容 url_content = url_escape(input_content) 4. 转义富文本内容 texteditor_content = texteditor_escape(input_content) """ from __future__ import unicode_literals from common.pxfilter import XssHtml from common.log import logger # -*- coding: utf-8 -*- from django.conf import settings from django.http import HttpResponse from itertools import chain import json import uuid import string import random import urllib import pytz import datetime import re import ConfigParser import os import platform import requests def html_escape(html, is_json=False): """ Replace special characters "&", "<" and ">" to HTML-safe sequences. If the optional flag quote is true, the quotation mark character (") is also translated. rewrite the cgi method @param html: html代码 @param is_json: 是否为json串(True/False) ,默认为False """ # &转换 if not is_json: html = html.replace("&", "&amp;") # Must be done first! # <>转换 html = html.replace("<", "&lt;") html = html.replace(">", "&gt;") # 单双引号转换 if not is_json: html = html.replace(' ', "&nbsp;") html = html.replace('"', "&quot;") html = html.replace("'", "&#39;") return html def url_escape(url): url = url.replace("<", "") url = url.replace(">", "") url = url.replace(' ', "") url = url.replace('"', "") url = url.replace("'", "") return url def texteditor_escape(str_escape): """ 富文本处理 @param str_escape: 要检测的字符串 """ try: parser = XssHtml() parser.feed(str_escape) parser.close() return parser.get_html() except Exception, e: logger.error(u"js脚本注入检测发生异常,错误信息:%s" % e) return str_escape def login_not_required(func): func.login_not_required = True return func def active_not_required(func): func.active_not_required = True return func def render_json(code=200,mes='',data=[],total=''): datas = { "code": code, "message": mes, "data": data, } if total: datas["total"] = total else: datas["total"] = 0 if settings.DEBUG: indent = 2 else: indent = 0 js = json.dumps(datas, indent) return HttpResponse(js,) def random_id(): random_id = str(uuid.uuid1()) random_id = random_id.upper() return random_id.replace("-","") def random_string(num): random_string = str(''.join(random.sample(string.ascii_letters + string.digits, num))) random_string = random_string.upper() return random_string def id_generator(size=32, chars=string.ascii_lowercase + string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) def tx_quote_plus(s, safe=''): return urllib.quote(s, safe) def albb_quote_plus(s, safe='~'): return urllib.quote(s, safe) def ksyun_quote_plus(s, safe=''): return urllib.quote(s, safe) def status_translate(s): datas = { # 数字翻译 "1": "运行中", "2": "关闭", # 基础 "running": "运行中", "active": "运行中", "stopping": "关机中", "stopped": "关闭", "shutoff": "关闭", "shutdown": "关闭", # 华为云 "creating": "创建中", "createfailed": "创建失败", "restarting": "重启中", "closing": "关机中", "frozen": "已冻结", "build": "创建中", "reboot": "重启中", "resize": "更新规格中", "verify_resize": "更新规格校验中", "hard_reboot": "强制重启中", "revert_resize": "更新规格回退中", # 金山云 "building": "创建中", "paused": "暂停", "suspended": "挂起", "resized": "离线迁移完成待确认/回退", "soft-delete": "已延迟删除,设定周期后自动强制删除", "deleted": "已删除", "deleting": "删除中", "error": "错误", # 金山云 "scheduling": "调度中", "block_device_mapping": '块存储设备映射中', "networking": '创建网络中', "spawning": '主机生成中', "image_snapshot": '快照创建中', "image_backup": ' 备份创建中', "updating_password": '主机修改密码中', "resize_prep": '准备升级配置/准备离线迁移', "resize_migrating": '离线迁移中', "resize_migrated": '已离线迁移', "resize_finish": '离线迁移完成', "resize_reverting": '离线迁移回退中', "resize_confirming": '离线迁移确认中', "migrating": '在线迁移中', "rebooting": '重启中', "rebooting_hard": '硬重启中', "pausing": '暂停中', "unpausing": '取消暂停中', "suspending": '挂起中', "resuming": '挂起恢复中', "starting": '开机中', "powering-off": '电源关闭中', "powering-on": '电源开启中', "rescuing": 'possible task states during rescue 故障恢复中', "unrescuing": 'possible task states during unrescue 解除故障恢复状态中', "rebuilding": '重装系统中', "rebuild_block_device_mapping": '重装系统块设备映射中', "rebuild_spawning": '重装系统主机生成中', # model错误翻译 "idcroom matching query does not exist.": '该机房不存在!', "rack matching query does not exist.": '该机柜不存在!', # vmware "powered_off": "关机", "powered_on": "运行中", "poweredoff": "关机", "poweredon": "运行中", } if s: s = s.lower() else: s = '' try: result = datas[s] except Exception: result = s return result def pay_strategy_translate(s): datas = { # 阿里云 "prepaid": "包年包月", "postpaid": "按量付费", # 腾讯云 # "prepaid": "包年包月", "postpaid_by_hour": "按量付费", # 金山云 "monthly": "按月付费", "yearly": "按年付费", } if s: s = s.lower() else: s = '' try: result = datas[s] except Exception: result = s return result def disk_category_translate(s): datas = { # 阿里云 "cloud": "普通云盘", "cloud_basic": "普通云盘", "cloud_efficiency": "高效云盘", "cloud_ssd": "SSD云盘", "ephemeral_ssd": "本地SSD盘", "ephemeral": "本地磁盘", "ephemeral_basic": "本地磁盘", # 腾讯云 "local_basic": "本地硬盘", "local_ssd": "本地SSD硬盘", # "cloud_basic": "普通云硬盘", # "cloud_ssd": "SSD云盘", "cloud_premium": "高性能云硬盘", } s = s.lower() try: result = datas[s] except Exception: result = s return result def is_valid_date(strdate): '''判断是否是一个有效的日期字符串''' try: datetime.datetime.strptime(strdate, "%Y-%m-%d").date() return True except: return False def isIP(str): p = re.compile('^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$') if p.match(str): return True else: return False def is_internal_ip(ip): def ip_into_int(ip): # 先把 192.168.1.13 变成16进制的 c0.a8.01.0d ,再去了“.”后转成10进制的 3232235789 即可。 # (((((192 * 256) + 168) * 256) + 1) * 256) + 13 return reduce(lambda x,y:(x<<8)+y,map(int,ip.split('.'))) ip = ip_into_int(ip) net_a = ip_into_int('10.255.255.255') >> 24 net_b = ip_into_int('172.31.255.255') >> 20 net_c = ip_into_int('192.168.255.255') >> 16 return ip >> 24 == net_a or ip >> 20 == net_b or ip >> 16 == net_c def judging_asset_number_format(str): p = re.compile('^[a-zA-Z0-9_]{4,18}$') if p.match(str): return True else: return False def getEtcConfig(file_path,items_name): # 生成config对象 conf = ConfigParser.ConfigParser() # 用config对象读取配置文件 sysstr = platform.system() if (sysstr == "Windows"): path = os.path.dirname(__file__) parent_path = os.path.dirname(path) file_path = parent_path + file_path conf.read(file_path) else: conf.read(file_path) return dict(conf.items(items_name)) def sendEmail(to_email,html): # 获取email的配置信息 email_conf = getEtcConfig('/etc/msg/email.conf', 'email_main') url = email_conf["url"] # 您需要登录SendCloud创建API_USER,使用API_USER和API_KEY才可以进行邮件的发送。 params = { "apiUser": email_conf["api_user"], "apiKey": email_conf["api_key"], "from": email_conf["from"], "fromName": email_conf["from_name"], "to": to_email, "subject": email_conf["subject"], "html": html } r = requests.post(url, files={}, data=params) return r.text class JsonModel(object): def json(self): data = {} attnames = list(set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in self._meta.get_fields() # For complete backwards compatibility, you may want to exclude # GenericForeignKey from the results. if not (field.many_to_one and field.related_model is None) ))) for item in attnames: if not isinstance(getattr(self, item), \ (basestring, long, int, float, list, tuple, dict)) \ and getattr(self, item): data[item] = getattr(self, item).astimezone(pytz.timezone(settings.TIME_ZONE)).strftime("%Y-%m-%d %H:%M:%S") else: data[item] = getattr(self, item) return data def json_verbose_name(self): data = {} attnames = list(set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in self._meta.get_fields() # For complete backwards compatibility, you may want to exclude # GenericForeignKey from the results. if not (field.many_to_one and field.related_model is None) ))) for item in attnames: if not isinstance(getattr(self, item), \ (basestring, long, int, float, list, tuple, dict)) \ and getattr(self, item): data[self._meta.get_field(item).verbose_name] = getattr(self, item).astimezone(pytz.timezone(settings.TIME_ZONE)).strftime("%Y-%m-%d %H:%M:%S") else: data[self._meta.get_field(item).verbose_name] = getattr(self, item) return data def attr_list(self): attnames = list(set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in self._meta.get_fields() # For complete backwards compatibility, you may want to exclude # GenericForeignKey from the results. if not (field.many_to_one and field.related_model is None) ))) return attnames
[ "yangwu@tyun.cn" ]
yangwu@tyun.cn
3b6b5659eb0d0fdc61d6ad5674f95e90737703a9
0148a4a10bff695045f7956c11c34cfec7a2a148
/arcade/python/caravan_of_collections/46.py
cd37d8543a7c4ba38cc430c3ea30e6388d0aca93
[]
no_license
pdsmith90/codesignal
bd6e0e8dd511451dff757db68d288f88681d2e7a
9bd732584587e422036e2ffd9350cac8b99c609e
refs/heads/main
2023-04-15T22:40:40.173205
2021-04-26T20:04:12
2021-04-26T20:04:12
356,585,580
1
1
null
null
null
null
UTF-8
Python
false
false
221
py
from collections import deque def doodledPassword(digits): n = len(digits) res = [deque(digits) for _ in range(n)] deque(map(lambda x: res[x].rotate(-x),range(len(res))), 0) return [list(d) for d in res]
[ "noreply@github.com" ]
pdsmith90.noreply@github.com
0845b6e84d193784730fba1c21ceac9fbba13673
9a2215d856191dc5325281da4bd780923d882600
/combinations.py
a645aaf48fec506efec9d35b9a2cdd5ce10d8b58
[]
no_license
ciscprocess/leetcode-problems
7fe8ed2b9fac155b41ea15b97ea739b6f8879289
dbbeb782fa72a30ab6c62dad89d4cee46d53e2cb
refs/heads/master
2021-01-23T22:15:13.405808
2015-01-21T00:05:21
2015-01-21T00:05:21
29,561,399
0
0
null
null
null
null
UTF-8
Python
false
false
276
py
__author__ = 'Nathan' class Solution: # @return a list of lists of integers def combine(self, n, k): pass def step(self, depth, n, k): for i in range(depth, n): pass def main(): print 'hi' if __name__ == "__main__": main()
[ "ciscprocess@gmail.com" ]
ciscprocess@gmail.com
4ebd87e5c2ce79ca2cb99a93d6f23de47520a7f0
af98da80f355fd75c6edfe76feb6806b8adba6a9
/pytorch-pretrained-BERT/make_data.py
bb69d729bfcf71d425ca02601f507acb72a49147
[ "Apache-2.0" ]
permissive
dzhao123/exp
4d2fe39c33785eb15893bc4ae3eea12f2067b83e
aef8152127d3f1f6671310d942fca78bea0d46e3
refs/heads/master
2022-10-19T16:04:58.074873
2019-07-08T13:27:32
2019-07-08T13:27:32
182,865,478
0
1
null
2022-10-03T00:08:00
2019-04-22T20:47:21
Python
UTF-8
Python
false
false
1,341
py
from pytorch_pretrained_bert import BertTokenizer tokenizer = BertTokenizer('/work/ececis_research/Manning/uncased_L-12_H-768_A-12/vocab.txt') def trans(txt): #print(txt) return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(txt)) def make_data(line): #for line in data: #line = line.strip('\n').split('\t') print(line[0]) print(line[1]) print(line[2]) print(line[3]) print(line[4]) print(line[5]) print(line[6]) qury, docp, docn, docpp, docpn, docnp, docnn = trans(line[0]), trans(line[1]), trans(line[2]), trans(line[3]), trans(line[4]), trans(line[5]), trans(line[6]) return ','.join(str(x) for x in qury) + '\t' + ','.join(str(x) for x in docp) + '\t' + ','.join(str(x) for x in docn) + '\t' + ','.join(str(x) for x in docpp) + '\t' + ','.join(str(x) for x in docpn) + '\t' + ','.join(str(x) for x in docnp) + '\t' + ','.join(str(x) for x in docnn) + '\n' if __name__ == '__main__': with open("/work/ececis_research/Manning/train_TFC1_bert_A.txt") as file: data = file.readlines(10) with open("/work/ececis_research/Manning/b.txt", "w") as file: for line in data: line = line.strip('\n').split('\t') if len(line) < 3: continue output = make_data(line) file.write(output)
[ "zhaodimcmc@gmail.com" ]
zhaodimcmc@gmail.com
47633761925b05cb7b78e248675293d5b8f9b673
c74c907a32da37d333096e08d2beebea7bea65e7
/kaikeba/cv/week6/Week 6 coding/model/network.py
0d65271f2868c98a4052e0036c292fdbae18f056
[]
no_license
wangqiang79/learn
6b37cc41140cc2200d928f3717cfc72357d10d54
e4b949a236fa52de0e199c69941bcbedd2c26897
refs/heads/master
2022-12-25T06:24:39.163061
2020-07-13T15:43:13
2020-07-13T15:43:13
231,796,188
2
2
null
2022-12-08T07:03:05
2020-01-04T16:45:33
Jupyter Notebook
UTF-8
Python
false
false
1,057
py
import torch.nn as nn import torchvision.models as models from model.module import Block, Bottleneck, DownBottleneck, Layer #pytorch Torchvision class ResNet101v2(nn.Module): ''' ResNet101 model ''' def __init__(self): super(ResNet101v2, self).__init__() #下采样2倍 self.conv1 = Block(3, 64, 7, 3, 2) self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) self.conv2_1 =DownBottleneck(64, 256, stride=1) self.conv2_2 =Bottleneck(256, 256) self.conv2_3 =Bottleneck(256, 256) #下采样2倍 8倍 self.layer3 = Layer(256, [512]*2, "resnet") #下采样2倍 16倍 self.layer4 = Layer(512, [1024]*23, "resnet") #下采样2倍 32倍 self.layer5 = Layer(1024, [2048]*3, "resnet") def forward(self, x): f1 = self.conv1(x) f2 = self.conv2_3(self.conv2_2(self.conv2_1(self.pool1(f1)))) f3 = self.layer3(f2) f4 = self.layer4(f3) f5 = self.layer5(f4) return [f2, f3, f4, f5]
[ "wang20100226@outlook.com" ]
wang20100226@outlook.com
67b5e8386db8e4569b1d1edacc6de547975d3b74
af626ade966544b91fbfe0ea81fc887f1b8a2821
/qa/rpc-tests/python-bitcoinrtxrpc/setup.py
fdfe9d34717f62f28824291c40f42b658d4ca311
[ "MIT" ]
permissive
kunalbarchha/bitcoinrtx-old
91f2b36a50e009151c61d36e77a563d0c17ab632
42c61d652288f183c4607462e2921bb33ba9ec1f
refs/heads/master
2023-03-13T22:46:36.993580
2021-03-04T13:01:00
2021-03-04T13:01:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
630
py
#!/usr/bin/env python2 from distutils.core import setup setup(name='python-bitcoinrtxrpc', version='0.1', description='Enhanced version of python-jsonrpc for use with Bitcoinrtx', long_description=open('README').read(), author='Jeff Garzik', author_email='<jgarzik@exmulti.com>', maintainer='Jeff Garzik', maintainer_email='<jgarzik@exmulti.com>', url='http://www.github.com/jgarzik/python-bitcoinrtxrpc', packages=['bitcoinrtxrpc'], classifiers=['License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', 'Operating System :: OS Independent'])
[ "kunal@coinrecoil.com" ]
kunal@coinrecoil.com
e464341de923718bbbaa1488a84683873c713a8e
e735a31f57bf6a7e0fe46c5819ccd5582a9dd610
/BackFrontInterface.py
8dd37b3027eda00999404b5e77f06c11ac5824e6
[]
no_license
LucaFiume/PWScale
91a81d75727b7ac071289241f7935f38a63c78d8
9ce67a0119225c7faed8191dac3f388d6d8a3cba
refs/heads/main
2023-02-27T01:39:55.426071
2021-01-28T08:25:47
2021-01-28T08:25:47
333,689,060
0
0
null
2021-01-28T08:07:58
2021-01-28T08:07:57
null
UTF-8
Python
false
false
3,947
py
from flask import Flask, request, jsonify from Tester import Tester app = Flask(__name__) testers = {} previousType = {} questionCount = {} questionsAsked = {} finished = {} """This script intends to apply Flask for an integration between a Javascript frontend and this Python App. This app should be used as follows: 1. First, call 'http://.../start?id=...', creating a new test object, taking the user's id as input. The ouput is the question IDs of the initial battery of questions. 2. Then, start the dynamic part of the test, given the answers to the static part of the test. This is done by calling 'http://.../start-dynamic?id=...' from the test frontend. This method takes as input a list of answers, each encoded as a whole number from 0 to 4, and returns the question ID of the next question to be asked. 3. For the remaining questions of the dynamic part of the test, the method called with 'http://.../next-dynamic?id=...' is used. Here, the frontend inputs the answer of the previous dynamic question, expressed as a list containing a whole number ranging from 0 to 4, and returns the question ID of the next question to be asked. The frontend should call this method iteratively until the next question to be asked is None. 4. Once the dynamic part of the video is done, a set of videos associated to the resulting score are displayed. Thus, when calling 'http://.../videos?id=...', the corresponding method ouputs the video IDs corresponding to the videos to be shown. 5. Lastly, once the user's answers to the presented videos is received, the method attached to 'http://.../final-report' has to be called, taking as input a list of answers, which here can take any value from 0 to 4. Once the user's response is inputted, these responses are processed in order to report the final test score.""" @app.route('/start') def setup(): user_id = request.args.get('id') testers[user_id] = Tester() questionsAsked[user_id] = testers[user_id].self_assessment_emmit() previousType[user_id] = 'W' questionCount[user_id] = 1 finished[user_id] = False return jsonify(questionsAsked[user_id]) @app.route('/start-dynamic') def start_dynamic(): user_id = request.args.get('id') test = testers[user_id] asked = questionsAsked[user_id] previous = previousType[user_id] count = questionCount[user_id] # Still unclear how answers are inputted/encoded here answers = request.json _, _ = test.receive(answers, asked, is_self_assessment=True) questionsAsked[user_id], finished[user_id] = test.test_core_emmit(previous, count) return jsonify(questionsAsked[user_id]) @app.route('/next-dynamic') def next_dynamic(): user_id = request.args.get('id') test = testers[user_id] asked = questionsAsked[user_id] if len(asked) > 0 and not finished[user_id]: answers = request.json count = questionCount[user_id] _, _ = test.receive(answers, asked, count) questionCount[user_id] += 1 previousType[user_id] = asked[-1][0] count = questionCount[user_id] previous = previousType[user_id] questionsAsked[user_id], finished[user_id] = test.test_core_emmit(previous, count) return jsonify(questionsAsked[user_id]) @app.route('/videos') def videos(): user_id = request.args.get('id') test = testers[user_id] questionsAsked[user_id] = test.video_emmit() return jsonify(questionsAsked[user_id]) @app.route('/final-report') def report(): user_id = request.args.get('id') test = testers[user_id] asked = questionsAsked[user_id] answers = request.json p_score, w_score = test.receive(answers, asked, is_video=True) del testers[user_id] del questionsAsked[user_id] del previousType[user_id] del questionCount[user_id] del finished[user_id] return jsonify(p_score), jsonify(w_score) if __name__ == '__main__': app.run()
[ "alvarotomasasv@gmai.com" ]
alvarotomasasv@gmai.com
a54826ebaa280ca23aad774cb3f6aec445632e62
163bbb4e0920dedd5941e3edfb2d8706ba75627d
/Code/CodeRecords/2561/60639/261665.py
65b5061f633e844981dc12f1881df53161da5a40
[]
no_license
AdamZhouSE/pythonHomework
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
ffc5606817a666aa6241cfab27364326f5c066ff
refs/heads/master
2022-11-24T08:05:22.122011
2020-07-28T16:21:24
2020-07-28T16:21:24
259,576,640
2
1
null
null
null
null
UTF-8
Python
false
false
446
py
def solution(n,x,arr1,arr2): sum=0 for i in range(n*n): if x-arr1[i] in arr2: sum+=1 else: continue print(sum) t=int(input()) for i in range(t): inp=input().split() n=int(inp[0]) x=int(inp[1]) arr1=[] arr2=[] for i in range(n): arr1+=list(map(int,input().split())) for i in range(n): arr2+=list(map(int,input().split())) solution(n,x,arr1,arr2)
[ "1069583789@qq.com" ]
1069583789@qq.com
3fa7693640d499ad3cc3b2d1aca80224ca920b45
81e01b2daa052adf44380d3224bdbfd023d01118
/chapter3-stacks-and-queues/3.5.py
a2370c7a59d997bc1fa24cb1450c7d2930cf507f
[]
no_license
yuanxu-li/careercup
888dbea43e4e91267c42683ae74b6ab56e27c2a1
d28ea71a1a5aaa97b23e23bb04c84aaa5f590a78
refs/heads/master
2021-01-10T00:56:45.710856
2016-03-28T00:58:47
2016-03-28T00:58:47
43,286,008
0
0
null
null
null
null
UTF-8
Python
false
false
2,309
py
# 3.5 Sort Stack: Write a program to sort a stack such that the smallest # items are on the top. You can use an additional temporary stack, but # you may not copy the elements into any other data structure (such as # an array). The stack supports the following operation: push, pop, peek, # and isEmpty. class StackNode: def __init__(self, data): self.data = data self.next = None class Stack: def __init__(self, next=None): self.top = None self.next = next self.size = 0 def push(self, data): node = StackNode(data) node.next = self.top self.top = node self.size += 1 def pop(self): if self.top is None: raise Exception("out of bound!") else: data = self.top.data self.top = self.top.next self.size -= 1 return data def peek(self): if self.top is None: return None else: return self.top.data def is_empty(self): return self.top is None def sort(self, compare=lambda x, y : x > y): """ merge sort time complexity: f(n) = 2*f(n/2) + 2n => O(nlogn) >>> s = Stack() >>> s.push(2) >>> s.push(1) >>> s.push(3) >>> s.push(4) >>> s.push(0) >>> s.sort() >>> s.pop() 0 >>> s.pop() 1 >>> s.pop() 2 >>> s.pop() 3 >>> s.pop() 4 """ if self.top == None: return middle = self.pop() upper_stack = Stack() lower_stack = Stack() while self.peek() is not None: data = self.pop() if compare(data, middle): lower_stack.push(data) else: upper_stack.push(data) reverse_compare = lambda x, y: not compare(x, y) lower_stack.sort(compare=reverse_compare) upper_stack.sort(compare=reverse_compare) while lower_stack.peek() is not None: self.push(lower_stack.pop()) self.push(middle) while upper_stack.peek() is not None: self.push(upper_stack.pop()) def sort_modified(self): """ time complexity: O(n^2) >>> s = Stack() >>> s.push(2) >>> s.push(1) >>> s.push(3) >>> s.push(4) >>> s.push(0) >>> s.sort_modified() >>> s.pop() 0 >>> s.pop() 1 >>> s.pop() 2 >>> s.pop() 3 >>> s.pop() 4 """ extra = Stack() while not self.is_empty(): temp = self.pop() while not extra.is_empty() and extra.peek() < temp: self.push(extra.pop()) extra.push(temp) self.top = extra.top if __name__ == "__main__": import doctest doctest.testmod()
[ "yuanxu.lee@gmail.com" ]
yuanxu.lee@gmail.com
5b8ac0baacc647c51ca2b8b194dd89a41db30913
35a0ebfa46bb9aab662a5bc23b6e6375751327b9
/exercise1/low_order_cycle_lengths.py
99fce4bc8f4a4ac878bec8630f7db4caec0a89d0
[]
no_license
46bit/entropy-practical
bae486efa29c04660cc8b5097ccfa7041ec757ba
5fe6c19d2c89c3067a15fc5d23866fcf3cc028b9
refs/heads/master
2021-05-01T20:33:06.641717
2016-11-09T02:16:37
2016-11-09T02:16:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,867
py
from cp_randomness.exercise1 import LCG, LCGS # Implement find_low_bits_cycle using the hints it contains. # To run this file and see your calculated cycle lengths, run # python3 low_order_cycle_lengths.py # To check your results, run the tests # python3 low_order_cycle_lengths_test.py def find_low_bits_cycle(low_bit_count, lcg): mask = (1 << low_bit_count) - 1 # Get the low bits of an LCG output like this: first_output = lcg.next() first_output_low_bits = first_output & mask # Try cycle lengths from 0 to the maximum possible cycle length. lob_cycle_length = None for offset in range(1, lcg.param.modulus + 1): # Make a copy of the original LCG. unoffset_lcg = LCG(lcg.param, seed=first_output) # This is how to make a new LCG setup the same as ours and then advance it by `offset'. # If the cycle length was 1, these two generators would have identical outputs forever. # Find the right amount to advance to have the outputs match that of the original generator. offset_lcg = LCG(lcg.param, seed=first_output).advance(offset) # Check the outputs match up until the maximum overall cycle length. matches = True for j in range(1, lcg.param.modulus + 1): # @TODO: By student. pass if matches: lob_cycle_length = offset break # Calculate the cycle length of the low order bits and return it as an integer. return lob_cycle_length if __name__ == "__main__": for lcg_name in ["A", "B", "C"]: lcg = LCGS[lcg_name] print("LCG %s\n %s" % (lcg_name, lcg.param)) for low_bit_count in range(1, 12): low_bit_cycle_length = find_low_bits_cycle(low_bit_count, lcg) print(" low_%dbits_cycle_length = %d" % (low_bit_count, low_bit_cycle_length))
[ "hi@46b.it" ]
hi@46b.it
6c8789550af13191f5d64b9eb7d8bbbced53484a
d2f6140a45b234711c0b6bce9ab98c9468d6041e
/homework/2014-20/similar_hw_3/HenryRueda_primos.py
d98f69dad28fbfccad7005e984d6a50995f81e42
[]
no_license
ComputoCienciasUniandes/MetodosComputacionalesDatos
14b6b78655ed23985ecff28550934dec96f6373b
ecc7c21ca13a3b7bbdc7a8ef5715e8c9bf6e48cf
refs/heads/master
2020-04-10T14:17:03.465444
2017-06-09T15:02:57
2017-06-09T15:02:57
12,526,572
0
8
null
null
null
null
UTF-8
Python
false
false
1,369
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys,math if len(sys.argv)<2: print('\nNo hay suficiente información\n'.format(sys.argv[0])) sys.exit(0) Maximo=1000000 n=int(sys.argv[1]) if n<0: print('\nEl número ingresado es negativo\n'.format(Maximo)) if n == 0: print ('\n{} no es un número valido para ejecutar este programa\n'.format(0)) sys.exit(0) if n>Maximo: print('\nEl número es mayor que un millón\n'.format(Maximo)) sys.exit(0) arrayprimos = [] factores = set() for i in range(2, Maximo+1): if i not in factores: arrayprimos.append(i) factores.update(range(i*i, Maximo+1, i)) def es_primo(n): if n in arrayprimos: return True if es_primo(n): print('\nEl número ingresado es primo\n'.format(n)) sys.exit(0) k=int(math.sqrt(n)+1) for i in range (0,k): aux=float(n)/arrayprimos[i] if aux%1==0 and es_primo(aux): if aux!=arrayprimos[i]: print('\n{} y {}'.format(arrayprimos[i], int(aux))) sys.exit(0) break else: print('\nexception'.format(arrayprimos[i], int(aux))) sys.exit(0) break print('\nEl número en su descomposición tiene más de dos factores primos')
[ "j.e.forero.romero@gmail.com" ]
j.e.forero.romero@gmail.com
7a5957ca47ce221a260a0e4b81b01764c18e7f4c
8679994dc5328d588d0f9c4a03a6f659775cfe4b
/testscripts/emptylogin.py
d2f4b57495a7c02cf35864e05b15d002b6c4fd03
[]
no_license
TheMysteryofDoom/houseblog
b65881bb995760e58101e2ff6aee6092e7109953
4f481938d6b25b2d69661f3504238f7a8f515d0e
refs/heads/master
2020-05-15T12:39:15.674194
2019-05-25T14:24:38
2019-05-25T14:24:38
182,272,702
2
1
null
2019-05-25T09:22:29
2019-04-19T13:57:38
Java
UTF-8
Python
false
false
314
py
from selenium import webdriver from selenium.webdriver.common.keys import Keys import string #Charles Ricky Villarin - SE41 driver = webdriver.Chrome() driver.get("localhost:8080/login") loginbtn = driver.find_element_by_id("login") loginbtn.click() driver.save_screenshot('error-emptylogin.jpg')
[ "noreply@github.com" ]
TheMysteryofDoom.noreply@github.com
d1c92acbcebd8f85a1f1734625261cf949103442
77fa042cadaf9d5fd8afcb9864311fb1bad7602f
/lab04/src/ice/Bank_ice.py
32b67cceaad45092661188c509756d9951cf334c
[]
no_license
konrad2508/distributed-systems-lab
96873faa2a0619328d7aa1190937836192aa892f
2c1531a0237ccaab601ef0c137b84528696d7f8b
refs/heads/master
2023-07-30T04:48:46.506011
2019-06-03T21:40:19
2019-06-03T21:40:19
174,689,130
2
0
null
2023-07-05T20:56:35
2019-03-09T12:04:24
Python
UTF-8
Python
false
false
18,936
py
# -*- coding: utf-8 -*- # # Copyright (c) ZeroC, Inc. All rights reserved. # # # Ice version 3.7.2 # # <auto-generated> # # Generated from file `Bank.ice' # # Warning: do not edit this file. # # </auto-generated> # from sys import version_info as _version_info_ import Ice, IcePy # Start of module Bank _M_Bank = Ice.openModule('Bank') __name__ = 'Bank' if 'AccountType' not in _M_Bank.__dict__: _M_Bank.AccountType = Ice.createTempClass() class AccountType(Ice.EnumBase): def __init__(self, _n, _v): Ice.EnumBase.__init__(self, _n, _v) def valueOf(self, _n): if _n in self._enumerators: return self._enumerators[_n] return None valueOf = classmethod(valueOf) AccountType.Standard = AccountType("Standard", 0) AccountType.Premium = AccountType("Premium", 1) AccountType._enumerators = { 0:AccountType.Standard, 1:AccountType.Premium } _M_Bank._t_AccountType = IcePy.defineEnum('::Bank::AccountType', AccountType, (), AccountType._enumerators) _M_Bank.AccountType = AccountType del AccountType if 'AccountException' not in _M_Bank.__dict__: _M_Bank.AccountException = Ice.createTempClass() class AccountException(Ice.UserException): def __init__(self, reason=''): self.reason = reason def __str__(self): return IcePy.stringifyException(self) __repr__ = __str__ _ice_id = '::Bank::AccountException' _M_Bank._t_AccountException = IcePy.defineException('::Bank::AccountException', AccountException, (), False, None, (('reason', (), IcePy._t_string, False, 0),)) AccountException._ice_type = _M_Bank._t_AccountException _M_Bank.AccountException = AccountException del AccountException if 'InvalidCredentialsException' not in _M_Bank.__dict__: _M_Bank.InvalidCredentialsException = Ice.createTempClass() class InvalidCredentialsException(_M_Bank.AccountException): def __init__(self, reason=''): _M_Bank.AccountException.__init__(self, reason) def __str__(self): return IcePy.stringifyException(self) __repr__ = __str__ _ice_id = '::Bank::InvalidCredentialsException' _M_Bank._t_InvalidCredentialsException = IcePy.defineException('::Bank::InvalidCredentialsException', InvalidCredentialsException, (), False, _M_Bank._t_AccountException, ()) InvalidCredentialsException._ice_type = _M_Bank._t_InvalidCredentialsException _M_Bank.InvalidCredentialsException = InvalidCredentialsException del InvalidCredentialsException if 'AccountAlreadyExistsException' not in _M_Bank.__dict__: _M_Bank.AccountAlreadyExistsException = Ice.createTempClass() class AccountAlreadyExistsException(_M_Bank.AccountException): def __init__(self, reason=''): _M_Bank.AccountException.__init__(self, reason) def __str__(self): return IcePy.stringifyException(self) __repr__ = __str__ _ice_id = '::Bank::AccountAlreadyExistsException' _M_Bank._t_AccountAlreadyExistsException = IcePy.defineException('::Bank::AccountAlreadyExistsException', AccountAlreadyExistsException, (), False, _M_Bank._t_AccountException, ()) AccountAlreadyExistsException._ice_type = _M_Bank._t_AccountAlreadyExistsException _M_Bank.AccountAlreadyExistsException = AccountAlreadyExistsException del AccountAlreadyExistsException if 'UnrecognizedCurrencyException' not in _M_Bank.__dict__: _M_Bank.UnrecognizedCurrencyException = Ice.createTempClass() class UnrecognizedCurrencyException(Ice.UserException): def __init__(self, reason=''): self.reason = reason def __str__(self): return IcePy.stringifyException(self) __repr__ = __str__ _ice_id = '::Bank::UnrecognizedCurrencyException' _M_Bank._t_UnrecognizedCurrencyException = IcePy.defineException('::Bank::UnrecognizedCurrencyException', UnrecognizedCurrencyException, (), False, None, (('reason', (), IcePy._t_string, False, 0),)) UnrecognizedCurrencyException._ice_type = _M_Bank._t_UnrecognizedCurrencyException _M_Bank.UnrecognizedCurrencyException = UnrecognizedCurrencyException del UnrecognizedCurrencyException if 'ClientData' not in _M_Bank.__dict__: _M_Bank.ClientData = Ice.createTempClass() class ClientData(object): def __init__(self, name='', surname='', id='', income=0.0): self.name = name self.surname = surname self.id = id self.income = income def __eq__(self, other): if other is None: return False elif not isinstance(other, _M_Bank.ClientData): return NotImplemented else: if self.name != other.name: return False if self.surname != other.surname: return False if self.id != other.id: return False if self.income != other.income: return False return True def __ne__(self, other): return not self.__eq__(other) def __str__(self): return IcePy.stringify(self, _M_Bank._t_ClientData) __repr__ = __str__ _M_Bank._t_ClientData = IcePy.defineStruct('::Bank::ClientData', ClientData, (), ( ('name', (), IcePy._t_string), ('surname', (), IcePy._t_string), ('id', (), IcePy._t_string), ('income', (), IcePy._t_double) )) _M_Bank.ClientData = ClientData del ClientData if '_t_LoanHistory' not in _M_Bank.__dict__: _M_Bank._t_LoanHistory = IcePy.defineDictionary('::Bank::LoanHistory', (), IcePy._t_string, IcePy._t_string) if '_t_LoanHistorySeq' not in _M_Bank.__dict__: _M_Bank._t_LoanHistorySeq = IcePy.defineSequence('::Bank::LoanHistorySeq', (), _M_Bank._t_LoanHistory) if 'AccountData' not in _M_Bank.__dict__: _M_Bank.AccountData = Ice.createTempClass() class AccountData(Ice.Value): def __init__(self, accountType=_M_Bank.AccountType.Standard, funds=0.0, loans=Ice.Unset): self.accountType = accountType self.funds = funds self.loans = loans def ice_id(self): return '::Bank::AccountData' @staticmethod def ice_staticId(): return '::Bank::AccountData' def __str__(self): return IcePy.stringify(self, _M_Bank._t_AccountData) __repr__ = __str__ _M_Bank._t_AccountData = IcePy.defineValue('::Bank::AccountData', AccountData, -1, (), False, False, None, ( ('accountType', (), _M_Bank._t_AccountType, False, 0), ('funds', (), IcePy._t_double, False, 0), ('loans', (), _M_Bank._t_LoanHistorySeq, True, 1) )) AccountData._ice_type = _M_Bank._t_AccountData _M_Bank.AccountData = AccountData del AccountData if 'RegistrationInfo' not in _M_Bank.__dict__: _M_Bank.RegistrationInfo = Ice.createTempClass() class RegistrationInfo(object): def __init__(self, accountType=_M_Bank.AccountType.Standard, password=''): self.accountType = accountType self.password = password def __hash__(self): _h = 0 _h = 5 * _h + Ice.getHash(self.accountType) _h = 5 * _h + Ice.getHash(self.password) return _h % 0x7fffffff def __compare(self, other): if other is None: return 1 elif not isinstance(other, _M_Bank.RegistrationInfo): return NotImplemented else: if self.accountType is None or other.accountType is None: if self.accountType != other.accountType: return (-1 if self.accountType is None else 1) else: if self.accountType < other.accountType: return -1 elif self.accountType > other.accountType: return 1 if self.password is None or other.password is None: if self.password != other.password: return (-1 if self.password is None else 1) else: if self.password < other.password: return -1 elif self.password > other.password: return 1 return 0 def __lt__(self, other): r = self.__compare(other) if r is NotImplemented: return r else: return r < 0 def __le__(self, other): r = self.__compare(other) if r is NotImplemented: return r else: return r <= 0 def __gt__(self, other): r = self.__compare(other) if r is NotImplemented: return r else: return r > 0 def __ge__(self, other): r = self.__compare(other) if r is NotImplemented: return r else: return r >= 0 def __eq__(self, other): r = self.__compare(other) if r is NotImplemented: return r else: return r == 0 def __ne__(self, other): r = self.__compare(other) if r is NotImplemented: return r else: return r != 0 def __str__(self): return IcePy.stringify(self, _M_Bank._t_RegistrationInfo) __repr__ = __str__ _M_Bank._t_RegistrationInfo = IcePy.defineStruct('::Bank::RegistrationInfo', RegistrationInfo, (), ( ('accountType', (), _M_Bank._t_AccountType), ('password', (), IcePy._t_string) )) _M_Bank.RegistrationInfo = RegistrationInfo del RegistrationInfo _M_Bank._t_Account = IcePy.defineValue('::Bank::Account', Ice.Value, -1, (), False, True, None, ()) if 'AccountPrx' not in _M_Bank.__dict__: _M_Bank.AccountPrx = Ice.createTempClass() class AccountPrx(Ice.ObjectPrx): def getAccountData(self, context=None): return _M_Bank.Account._op_getAccountData.invoke(self, ((), context)) def getAccountDataAsync(self, context=None): return _M_Bank.Account._op_getAccountData.invokeAsync(self, ((), context)) def begin_getAccountData(self, _response=None, _ex=None, _sent=None, context=None): return _M_Bank.Account._op_getAccountData.begin(self, ((), _response, _ex, _sent, context)) def end_getAccountData(self, _r): return _M_Bank.Account._op_getAccountData.end(self, _r) @staticmethod def checkedCast(proxy, facetOrContext=None, context=None): return _M_Bank.AccountPrx.ice_checkedCast(proxy, '::Bank::Account', facetOrContext, context) @staticmethod def uncheckedCast(proxy, facet=None): return _M_Bank.AccountPrx.ice_uncheckedCast(proxy, facet) @staticmethod def ice_staticId(): return '::Bank::Account' _M_Bank._t_AccountPrx = IcePy.defineProxy('::Bank::Account', AccountPrx) _M_Bank.AccountPrx = AccountPrx del AccountPrx _M_Bank.Account = Ice.createTempClass() class Account(Ice.Object): def ice_ids(self, current=None): return ('::Bank::Account', '::Ice::Object') def ice_id(self, current=None): return '::Bank::Account' @staticmethod def ice_staticId(): return '::Bank::Account' def getAccountData(self, current=None): raise NotImplementedError("servant method 'getAccountData' not implemented") def __str__(self): return IcePy.stringify(self, _M_Bank._t_AccountDisp) __repr__ = __str__ _M_Bank._t_AccountDisp = IcePy.defineClass('::Bank::Account', Account, (), None, ()) Account._ice_type = _M_Bank._t_AccountDisp Account._op_getAccountData = IcePy.Operation('getAccountData', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (), (), ((), _M_Bank._t_AccountData, False, 0), (_M_Bank._t_InvalidCredentialsException,)) _M_Bank.Account = Account del Account _M_Bank._t_PremiumAccount = IcePy.defineValue('::Bank::PremiumAccount', Ice.Value, -1, (), False, True, None, ()) if 'PremiumAccountPrx' not in _M_Bank.__dict__: _M_Bank.PremiumAccountPrx = Ice.createTempClass() class PremiumAccountPrx(_M_Bank.AccountPrx): def getLoan(self, amount, currency, length, context=None): return _M_Bank.PremiumAccount._op_getLoan.invoke(self, ((amount, currency, length), context)) def getLoanAsync(self, amount, currency, length, context=None): return _M_Bank.PremiumAccount._op_getLoan.invokeAsync(self, ((amount, currency, length), context)) def begin_getLoan(self, amount, currency, length, _response=None, _ex=None, _sent=None, context=None): return _M_Bank.PremiumAccount._op_getLoan.begin(self, ((amount, currency, length), _response, _ex, _sent, context)) def end_getLoan(self, _r): return _M_Bank.PremiumAccount._op_getLoan.end(self, _r) @staticmethod def checkedCast(proxy, facetOrContext=None, context=None): return _M_Bank.PremiumAccountPrx.ice_checkedCast(proxy, '::Bank::PremiumAccount', facetOrContext, context) @staticmethod def uncheckedCast(proxy, facet=None): return _M_Bank.PremiumAccountPrx.ice_uncheckedCast(proxy, facet) @staticmethod def ice_staticId(): return '::Bank::PremiumAccount' _M_Bank._t_PremiumAccountPrx = IcePy.defineProxy('::Bank::PremiumAccount', PremiumAccountPrx) _M_Bank.PremiumAccountPrx = PremiumAccountPrx del PremiumAccountPrx _M_Bank.PremiumAccount = Ice.createTempClass() class PremiumAccount(_M_Bank.Account): def ice_ids(self, current=None): return ('::Bank::Account', '::Bank::PremiumAccount', '::Ice::Object') def ice_id(self, current=None): return '::Bank::PremiumAccount' @staticmethod def ice_staticId(): return '::Bank::PremiumAccount' def getLoan(self, amount, currency, length, current=None): raise NotImplementedError("servant method 'getLoan' not implemented") def __str__(self): return IcePy.stringify(self, _M_Bank._t_PremiumAccountDisp) __repr__ = __str__ _M_Bank._t_PremiumAccountDisp = IcePy.defineClass('::Bank::PremiumAccount', PremiumAccount, (), None, (_M_Bank._t_AccountDisp,)) PremiumAccount._ice_type = _M_Bank._t_PremiumAccountDisp PremiumAccount._op_getLoan = IcePy.Operation('getLoan', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (((), IcePy._t_double, False, 0), ((), IcePy._t_string, False, 0), ((), IcePy._t_int, False, 0)), (), ((), IcePy._t_double, False, 0), (_M_Bank._t_UnrecognizedCurrencyException, _M_Bank._t_InvalidCredentialsException)) _M_Bank.PremiumAccount = PremiumAccount del PremiumAccount _M_Bank._t_AccountManagement = IcePy.defineValue('::Bank::AccountManagement', Ice.Value, -1, (), False, True, None, ()) if 'AccountManagementPrx' not in _M_Bank.__dict__: _M_Bank.AccountManagementPrx = Ice.createTempClass() class AccountManagementPrx(Ice.ObjectPrx): def register(self, clientData, context=None): return _M_Bank.AccountManagement._op_register.invoke(self, ((clientData, ), context)) def registerAsync(self, clientData, context=None): return _M_Bank.AccountManagement._op_register.invokeAsync(self, ((clientData, ), context)) def begin_register(self, clientData, _response=None, _ex=None, _sent=None, context=None): return _M_Bank.AccountManagement._op_register.begin(self, ((clientData, ), _response, _ex, _sent, context)) def end_register(self, _r): return _M_Bank.AccountManagement._op_register.end(self, _r) def login(self, context=None): return _M_Bank.AccountManagement._op_login.invoke(self, ((), context)) def loginAsync(self, context=None): return _M_Bank.AccountManagement._op_login.invokeAsync(self, ((), context)) def begin_login(self, _response=None, _ex=None, _sent=None, context=None): return _M_Bank.AccountManagement._op_login.begin(self, ((), _response, _ex, _sent, context)) def end_login(self, _r): return _M_Bank.AccountManagement._op_login.end(self, _r) @staticmethod def checkedCast(proxy, facetOrContext=None, context=None): return _M_Bank.AccountManagementPrx.ice_checkedCast(proxy, '::Bank::AccountManagement', facetOrContext, context) @staticmethod def uncheckedCast(proxy, facet=None): return _M_Bank.AccountManagementPrx.ice_uncheckedCast(proxy, facet) @staticmethod def ice_staticId(): return '::Bank::AccountManagement' _M_Bank._t_AccountManagementPrx = IcePy.defineProxy('::Bank::AccountManagement', AccountManagementPrx) _M_Bank.AccountManagementPrx = AccountManagementPrx del AccountManagementPrx _M_Bank.AccountManagement = Ice.createTempClass() class AccountManagement(Ice.Object): def ice_ids(self, current=None): return ('::Bank::AccountManagement', '::Ice::Object') def ice_id(self, current=None): return '::Bank::AccountManagement' @staticmethod def ice_staticId(): return '::Bank::AccountManagement' def register(self, clientData, current=None): raise NotImplementedError("servant method 'register' not implemented") def login(self, current=None): raise NotImplementedError("servant method 'login' not implemented") def __str__(self): return IcePy.stringify(self, _M_Bank._t_AccountManagementDisp) __repr__ = __str__ _M_Bank._t_AccountManagementDisp = IcePy.defineClass('::Bank::AccountManagement', AccountManagement, (), None, ()) AccountManagement._ice_type = _M_Bank._t_AccountManagementDisp AccountManagement._op_register = IcePy.Operation('register', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (((), _M_Bank._t_ClientData, False, 0),), (), ((), _M_Bank._t_RegistrationInfo, False, 0), (_M_Bank._t_AccountAlreadyExistsException,)) AccountManagement._op_login = IcePy.Operation('login', Ice.OperationMode.Normal, Ice.OperationMode.Normal, False, None, (), (), (), ((), _M_Bank._t_AccountPrx, False, 0), (_M_Bank._t_InvalidCredentialsException,)) _M_Bank.AccountManagement = AccountManagement del AccountManagement # End of module Bank
[ "konrad1997@gmail.com" ]
konrad1997@gmail.com
2e8b7dc96dc9fea6cd98624dca6cf79be8d7aacb
0f8fee33880b5625def82350b3cec70fbcca6470
/player.py
8925a902369c20bb36053d5c54932836ee9f63f6
[ "MIT" ]
permissive
bastinat0r/webaudio
1aa37c0f37eef7923eb21fc7e92763331be70fbf
185c0c164ec3b8304a5f115a94bde8a281eb467b
refs/heads/master
2021-01-01T05:30:28.529045
2014-03-10T12:08:18
2014-03-10T12:08:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,463
py
import mplayer import youtube from time import sleep import threading import sys import audiourl import argparse class Player(threading.Thread): """Docstring for Player """ def __init__(self, playlist=[], paused=False, pulse=False): """""" self._playlist = [] for entry in playlist: self.append(entry) self._pause = paused self._archive = [] if pulse: self._p = mplayer.Player(args="-ao pulse") else: self._p = mplayer.Player() threading.Thread.__init__(self) self.daemon = True self._current = None self._next = None def _fetch_next(self): """get next item from playlist :returns: path to audio-file """ if self._playlist == []: self._playlist = self._archive self._playlist.reverse() # reverse because stack self._archive = [] if self._playlist == []: return youtube.dl_audiostream("https://www.youtube.com/watch?v=oHg5SJYRHA0", path="music") current = self._playlist.pop() if youtube.validate(current): self._archive.append(current) return youtube.dl_audiostream(current, path="music") elif audiourl.validate(current): r = retrieve_url(current) if r: self._archive.append(current) return r def append(self, command): """append new entry to playlist """ if command == "skip": self.play_next() elif command == "pause": self._pause = not self._pause return; elif command.split("=")[0] == "volume": f = float(command.split("=")[1]) if f > 100.0: f = 100.0 if f < 0.0: f = 0.0 self._p.volume = f return; elif youtube.validate(command): self._playlist.append(command) print "appending %s to playlist" %command return; elif audiourl.validate(command): self._playlist.append(command) print "appending %s to playlist" %command def play_next(self): self._current = self._next if not self._current: self._current = self._fetch_next() print "playing %s" %self._current self._p.loadfile(self._current) self._pause = False if self._p.paused: self._p.pause() self._next = self._fetch_next() def run(self): while True: if self._p.paused != self._pause: self._p.pause() if not (self._p.percent_pos >= 0): self.play_next() sleep(1) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Webdownloarding music-player') parser.add_argument('-p','--playlist' , help='playlist-file', default=None) parser.add_argument('--paused' , help="start paused, input pause to start playing", action="store_true") parser.add_argument('--pulse' , help="activate the pulseaudio driver", action="store_true") args = parser.parse_args() l = [] if args and args.playlist: with open(args.playlist) as pl: l = [ line.rstrip() for line in pl ] p = Player(playlist=l, paused=args.paused, pulse=args.pulse) p.start() while True: s = raw_input() p.append(s)
[ "bastinat0r@bastinat0r.de" ]
bastinat0r@bastinat0r.de
2f0f6d17bc0837a0cf08c9ec5743e93ced6d825e
c771d2ca8f7e1744a0e3a7deb9c88596871b63bc
/semana1/dimensoes_matriz.py
0193c4f8a9da83329b4306d556fec129634ef500
[]
no_license
r25ta/USP_python_2
9acf49ab02b5c0222eadc10710d914b90b64cd6e
101ab95734713a17b255cb5825679c3f9bb5bc85
refs/heads/master
2023-07-14T22:49:34.158627
2021-08-16T23:54:16
2021-08-16T23:54:16
377,664,374
0
0
null
null
null
null
UTF-8
Python
false
false
290
py
def dimensoes(matriz): linhas = 0 for l in range(len(matriz)): colunas = 0 for c in range(len(matriz[l])): colunas += 1 linhas += 1 print(f"{linhas}X{colunas}") def main(): matriz = [[1,3,2],[3,1,4]] dimensoes(matriz) main()
[ "twentyfive@hotmail.com" ]
twentyfive@hotmail.com
07d214b22b6821a929c66b84c868ee1c7a506e58
cb8d5308290f806531c72fcd3b59cee70c8ea8cd
/Shortest Job First.py
f9dbc25e6baf352c97a9820f51b202072b345fdd
[]
no_license
SohaibSaqib1/OSAssignments
8bc8f4e5acd8f4f8bb7c909e43e1f5abc3f3e273
d3101777be8a107a9137fb0e6c966a657f178523
refs/heads/master
2020-04-01T15:14:45.429584
2018-11-29T18:56:11
2018-11-29T18:56:11
153,328,118
0
0
null
null
null
null
UTF-8
Python
false
false
973
py
process =[] b_time = [] a_time = [] f_time =[] w_time =[] sum =int(0) print "No. of processes" n = input() print "Arrival time is set at 0 for every process" a_time = [int(0) for i in range(n)] print "Burst time of the processes" b_time = [input() for i in range(n)] for i in range(n): for j in range(n-i-1): if b_time[j] > b_time[j+1]: p2=b_time[j] b_time[j]=b_time[j+1] b_time[j]=p2 p3=process[j] process[j]=process[j+1] process[j]=p3 print "Process Name"+"\t"+"Arrival time"+"\t"+"Burst time" for i in range(n): print process[i],"\t",a_time[i],"\t",b_time[i] f_time = [int(a_time[i] + b_time[i]) for i in range(n)] w_time=[int(f_time[i]-a_time[i]) for i in range(n)] for i in range(n): sum = sum + int(w_time[i]) print "waiting time of Process" for i in range(n): print process[i],"\t",w_time[i] print "Average waiting time ",sum/n
[ "noreply@github.com" ]
SohaibSaqib1.noreply@github.com
7822c1401b53a59962d8ff2c9a319e87026d1b5d
db9cb3f04dc6d0bdf5f78b7e627ee39201367ace
/loop_vs_array.py
66dd66d1001e8e09a659c08a53574fcc1ca222e6
[]
no_license
rometsch/cplab-hydro
21a350540d9f5f658f567d64b58f94881231b869
7d2f48f9d953fac5530fe8eb9c03d03438436113
refs/heads/master
2021-01-12T11:08:43.575091
2016-11-21T13:11:22
2016-11-21T13:11:22
72,847,964
0
0
null
null
null
null
UTF-8
Python
false
false
2,414
py
# -*- coding: utf-8 -*- """ Created on Fri Nov 4 14:57:23 2016 @author: thomas """ import matplotlib.pyplot as plt import advection def initial_function(x): if (x<-1./3 or x>1./3): return 0.0; else: return 1.0; # Make sure upwind 2nd order with loop calculation # yields same result as with array calculation def compare_loop_vs_array(): plt.figure(); plt.clf(); labels = ["analytic"]; style = ['.','x']; methods = ['upwind_2nd_loop','upwind_2nd_arr']; T=40; # Plot initial function: initial = advection.Box(initial_function,[-1,1],400,1.0); plt.plot(initial.get_x(),initial.get_Psi(),'-') for n in range(2): box = advection.Box(initial_function,[-1,1],400,1.0); box.integrate(T,methods[n]); labels.append(methods[n]); plt.plot(box.get_x(),box.get_Psi(),style[n]) plt.legend(labels,loc=(0.75,0.5)); plt.xlabel("x"); plt.ylabel("u"); plt.title("T = {}".format(T)) plt.draw(); plt.savefig("loop vs array.pdf".format(T)); compare_loop_vs_array(); #%% Study time cost of loop vs array for 2nd order Upwind: import timeit N = 2; # Measure time to integrate untill T=4, average over N runs: T_upwind = timeit.timeit('box.integrate(4,\'upwind\')',setup='import advection; from __main__ import initial_function; box=advection.Box(initial_function,[-1,1],4000,1.0);',number=N)/N T_lax_wendroff = timeit.timeit('box.integrate(4,\'lax_wendroff\')',setup='import advection; from __main__ import initial_function; box=advection.Box(initial_function,[-1,1],4000,1.0);',number=N)/N T_upwind_2nd_loop = timeit.timeit('box.integrate(4,\'upwind_2nd_loop\')',setup='import advection; from __main__ import initial_function; box=advection.Box(initial_function,[-1,1],4000,1.0);',number=N)/N T_upwind_2nd_arr = timeit.timeit('box.integrate(4,\'upwind_2nd_arr\')',setup='import advection; from __main__ import initial_function; box=advection.Box(initial_function,[-1,1],4000,1.0);',number=N)/N print("Excecution times of integration from T=0 to T=4, (N=400,sigma=0.8)"); print("Upwind:\t\t\t {:.9f}".format(T_upwind)); print("Lax Werndroff:\t\t {:.9f}".format(T_lax_wendroff)); print("Upwind 2nd ord loop:\t {:.9f}".format(T_upwind_2nd_loop)); print("Upwind 2nd ord array:\t {:.9f}".format(T_upwind_2nd_arr)); print("\nRatio loop to array =\t {}".format(T_upwind_2nd_loop/T_upwind_2nd_arr))
[ "thomas.rometsch@gmail.com" ]
thomas.rometsch@gmail.com
cb092a7db01072d77bb3556bad1a1807f4173a38
f7489d73004d03969480064e79e2c82b90ad2d08
/wordcount/views.py
063b5479165cea4a1f6e332f5ea8522be048b583
[]
no_license
toppixx/wordcount-project
24202847ef3e0fdab951a7791adf80ffea7da93a
4da860c6a78fb6c1e9a38efeb718f291a044e48a
refs/heads/master
2020-03-10T14:09:22.552546
2018-04-13T15:04:15
2018-04-13T15:04:15
129,418,906
0
0
null
null
null
null
UTF-8
Python
false
false
1,372
py
from django.http import HttpResponse from django.shortcuts import render import json import operator def home(request): return render(request, 'home.html') def count(request): #aboutButton = request.GET['aboutButton'] #if aboutButton == 'submit': # return render(request, 'about.html') #body_unicode = request.body.decode('utf-8') #body_data = json.loads(body_unicode) itemGet = request.GET print(itemGet) if request.GET.get('textBox') is not None: textBox = request.GET['textBox'] wordlist = textBox.split() wordDictionary = {} for word in wordlist: if word in wordDictionary: #increase counter in wordDictionary wordDictionary[word] += 1 else: #add to wordDictionary wordDictionary[word] = 1 sortedwordDictionary = sorted(wordDictionary.items(), key=operator.itemgetter(1),reverse=True) return render(request,'count.html', {'textBox':textBox, 'count':len(wordlist),'sortedwordDictionary':sortedwordDictionary}) if request.GET.get('aboutButton') is not None: return render(request,'about.html') return render(request,'error.html') def about(request): aboutButton = request.GET['aboutButton'] if aboutButton == 'submit': return render(request, 'about.html')
[ "TobiasFaller@gmx.de" ]
TobiasFaller@gmx.de
89a58ddac413e0ea9c52c55fd7696271ee2a5528
3b573ec069cf956eede20af6f931d7fac5677348
/Modulo1-2/Basicos/resta.py
cff943c69c6ff7210616eb54a214530767e6e742
[]
no_license
codeevs/PythonLearn
5a05672c4f40be576f387630d13579e41ec450a5
040c4ba2ab8cdf7f463ab64cbcf4197624581023
refs/heads/master
2022-11-24T05:27:06.297276
2020-07-31T01:37:14
2020-07-31T01:37:14
283,914,117
0
0
null
null
null
null
UTF-8
Python
false
false
214
py
#El símbolo del operador de resta es obviamente - (el signo de menos), sin embargo debes notar que este operador tiene otra función - puede cambiar el signo de un número.# print(7 - 8) print (-4 -4) print(-1.-1)
[ "eval.sanches@gmail.com" ]
eval.sanches@gmail.com
30d7469399b92616e07ca3c05680f37c5bb243e6
a829f2bc751b3038171177276611df0c889ba13e
/adv_contructs/decorators_std_lib/classmethod.py
445dc4c0d4288c24c2c0beda3db6a64575df94eb
[]
no_license
karthikarun119/python
e5b7a268c2ddb72ac8fca160f5377bd8331a9cc1
b8da90cb5324658264d5d229584685d54bff9371
refs/heads/master
2020-04-08T19:20:42.972387
2018-12-04T13:13:30
2018-12-04T13:13:30
159,651,132
0
0
null
null
null
null
UTF-8
Python
false
false
192
py
import numpy class Array(object): def __init__(self, data): self.data = data @classmethod def fromfile(cls, file): data = numpy.load(file) return cls(data)
[ "karthikarun119@gmail.com" ]
karthikarun119@gmail.com
92bfb3c53ed15e6c59d7c0e88d1657aa55f993b6
5af6f7248b081a4df03f32a425da9f1d04c9af8a
/australia2.py
af3d18ce966c2a4924933bf7c7514b59c33cb2b2
[]
no_license
beginnerHB1/Invoice_extraction
15c58a8587fc3b332f95dd193d824895b7cb59ae
ac77f143de04b2554c2630985aa78e8cf872c852
refs/heads/master
2022-12-24T11:01:28.856063
2020-10-01T10:53:01
2020-10-01T10:53:01
296,529,319
0
0
null
null
null
null
UTF-8
Python
false
false
6,074
py
import pdftotext import re def find_regex(text): dct = {} for i in text.split("\n"): if "TAX INVOICE" in i: invoice_num = i.split("TAX INVOICE")[-1].strip() break elif "COMMERCIAL INVOICE" in i: invoice_num = i.split("COMMERCIAL INVOICE")[-1].strip() break dct["invoice_num"] = invoice_num dct["abn"] = re.findall("[0-9]{2} [0-9]{3} [0-9]{3} [0-9]{3}", text)[0].strip() dct["date"] = re.findall("[0-9]{1,2}[\/]{1}[0-9]{2}[\/]{1}[0-9]{4}", text)[0].strip() dct["phone"] = re.findall("[0-9]{2} [0-9]{4} [0-9]{4}", text)[0].strip() dct["fax"] = re.findall("[0-9]{2} [0-9]{4} [0-9]{4}", text)[1].strip() dct["case_number"] = re.findall(" [0-9]{8}\n", text)[0].strip() return dct #done def find_table_details(text): try: lst_line_det = [] end_index, start_index = text.index("CASE TOTAL"), text.index("SUPPLIED")+8 table_data = text[start_index:end_index].strip().split("\n") # print(table_data) if len(table_data) >= 2: for i,j in enumerate(table_data): try: re_pattern = "[1-9]{1,2} [A-Z]{4} " tx = re.findall(re_pattern, "".join(j))[0] start = table_data[i].index(tx) end = start + 6 table_data[i] = "".join(table_data[i][:start]) + "-".join(table_data[i][start:end].split()) + "".join(table_data[i][end:]) except IndexError: pass details_table = [] lst = [] for i in table_data: if len(i.split()) != 0 and "--------" not in i: lst.append(i.split()) for i in range(len(lst)): for j, k in enumerate(lst[i]): if len(k) < 2: lst[i].remove(lst[i][j]) else: pass for i,j in enumerate(lst): if "---------------" not in j: if len(j) >= 7: dct_line = {} dct_line["partnumber"] = j[0].strip() dct_line["donumber"] = j[1].strip() dct_line["harmonised"] = j[2].strip() dct_line["country"] = " ".join(j[3:-3]).strip() dct_line["quantityunit"] = j[-3].strip() dct_line["unitvalue"] = j[-2].strip() dct_line["amount"] = j[-1].strip() if len(lst[i+1]) == 2: dct_line["description"] = lst[i+1][0].strip() dct_line["cus_ord"] = lst[i+1][1].strip() elif len(lst[i+1]) == 3: dct_line["description"] = " ".join(lst[i+1][0:2]).strip() dct_line["cus_ord"] = lst[i+1][2].strip() lst_line_det.append(dct_line) return lst_line_det else: False except: False def find_details_australia(pdf): json_dct = {"supplier":"australia", "LineDetails":[], "InvoiceAmountDetails":[]} invoice_am = {} with open(pdf, "rb") as f: pdf = pdftotext.PDF(f) data = " " for i in range(len(pdf)): data += "\n" + pdf[i] dct = find_regex(data) cus_no = data[data.index("CUSTOMER NO"): data.index("DELIVERED TO")] if "ABN" in cus_no: final_cus_no = cus_no.split("ABN")[0] else: final_cus_no = cus_no.split(":")[-1].strip() dct["customer_no"] = final_cus_no try: table = find_table_details(data) json_dct["LineDetails"] = table except: json_dct["LineDetails"] = [] x = data.split("\n") # headers for i in range(len(x)): if "CASE NUMBER" in x[i]: x[i] = x[i][:x[i].index("CASE NUMBER")] elif "PAGE" in x[i]: x[i] = x[i][:x[i].index("PAGE")] elif "SUPP. CASE NO" in x[i]: x[i] = x[i][:x[i].index("SUPP. CASE NO")] elif "DATE INV/DEL" in x[i]: x[i] = x[i][:x[i].index("DATE INV/DEL")] elif "INVOICE CURRENCY" in x[i]: try: dct["invoice_currency"] = x[i].strip().split(":")[-1].strip() x[i] = x[i][:x[i].index("INVOICE")] json_dct["Header"] = dct # print(x[i]) except: continue elif "CASE TOTAL" in x[i]: try: invoice_am["case_total"] = x[i].strip().split(":")[-1].strip() except: continue elif "GST @10%" in x[i]: try: invoice_am["gst"] = x[i].strip().split(":")[-1].strip() except: continue elif "INVOICE TOTAL" in x[i]: try: invoice_am["invoicetotal"] = " ".join(x[i].strip().split(":")[-1].strip().split()).strip() except: continue json_dct["InvoiceAmountDetails"] = invoice_am for i in range(len(x)): if "DELIVERED TO" in x[i]: x[i] = x[i].strip() deliver_start_index = x.index(x[i]) x[i] = x[i][x[i].index(":")+1:] elif "MAIL TO :" in x[i]: x[i] = x[i].strip() deliver_end_index = x.index(x[i]) receiver_start_index = x.index(x[i]) x[i] = x[i][x[i].index(":")+1:] elif "------" in x[i]: x[i] = x[i].strip() receiver_end_index = x.index(x[i]) table_start_index = x.index(x[i]) try: lst = [i.strip() for i in x[deliver_start_index:deliver_end_index]] json_dct["deliveredto"] = " ".join(lst).strip() lst = [i.strip() for i in x[receiver_start_index: receiver_end_index]] json_dct["mailto"] = " ".join(lst).strip() except: pass return json_dct
[ "noreply@github.com" ]
beginnerHB1.noreply@github.com
315d7331d97e823173a175eadf6825e3f48c6a3a
753e660acd0ce36caf52867d53eb8d10f67aac45
/Djikstra.py
671f643c03a420cf6d864ec950dcd6ec620bd7ed
[]
no_license
vedantshetty/ZIP-Distance-Calculator
e269031c50d60dd14a613b11b41fc449bee499c8
ee4788df264e6613a76498557376a5564e1eed5a
refs/heads/master
2020-04-03T14:32:31.533667
2018-10-31T04:31:40
2018-10-31T04:31:40
155,325,240
0
0
null
null
null
null
UTF-8
Python
false
false
1,423
py
from collections import defaultdict class Graph: def __init__(self): self.nodes = set() self.edges = defaultdict(list) self.distances = {} self.times = {} #function to add graphs in a node def add_node(self, value): self.nodes.add(value) #function to build edges in a graph def add_edge(self, from_node, to_node, distances,time): self.edges[from_node].append(to_node) self.edges[to_node].append(from_node) self.times[(from_node,to_node)] = time self.times[(to_node,from_node)] = time self.distances[(from_node, to_node)] = distances self.distances[(to_node,from_node)] = distances def djikstra(graph, initial): visited = [{initial: 0}, {initial: 0}] #path = defaultdict(list) nodes = set(graph.nodes) while nodes: min_node = None for node in nodes: if node in visited[0]: if min_node is None: min_node = node elif visited[0][node] < visited[0][min_node]: min_node = node if min_node is None: break nodes.remove(min_node) current_weight = visited[0][min_node] current_dis =visited[1][min_node] for edge in graph.edges[min_node]: weight = current_weight + graph.times[(min_node, edge)] total_distances = current_dis + graph.distances[(min_node,edge)] if edge not in visited[0] or weight < visited[0][edge]: visited[0][edge] = weight visited[1][edge] = total_distances #path[edge].append(min_node) return visited
[ "vshetty.vs@gmail.com" ]
vshetty.vs@gmail.com
0680719e6df43f1672f7fe9872d0c7ee24994ecd
33baed525c0844f45fb863f847a5351d43049e19
/generator_de_parole/settings.py
9c9dc707078e4249f8ab44696cc23dd7c315bccc
[]
no_license
DugheanaSebastianAdelin/Password_generator
1c2b55ceeb7a3e0ec8ee8db90de8a42d96c978cc
2e4c33b1f92cfc012d1274d91ac129e96cbf1b30
refs/heads/master
2022-12-23T05:53:38.161049
2020-09-19T20:32:41
2020-09-19T20:32:41
293,531,180
0
0
null
null
null
null
UTF-8
Python
false
false
3,198
py
""" Django settings for generator_de_parole project. Generated by 'django-admin startproject' using Django 3.1. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path import os # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve(strict=True).parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '5^bm(^kx*xb1-yx=#fe=cmi9=1z038+bwk&d#zz8o=$nqh+q%c' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'generator', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'generator_de_parole.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'generator_de_parole.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), )
[ "dugheanasebastianadelin@gmail.com" ]
dugheanasebastianadelin@gmail.com
4e63ec6e11a2930a2f2777279a9b9d37db8a763a
8ed6c124bc994a9339d43081280164f66b0ce0cf
/prob25.py
d466dfa5c7e1bd3ef4b9a7e54c8b987d7180385a
[]
no_license
ritwik95/Project-Euler
0d9c40e5f0c4f44df6c929dc894f96053793be82
739f530c68428de8ad0c2447df56370b8b07a95a
refs/heads/master
2021-01-01T18:11:56.427908
2015-03-13T18:35:14
2015-03-13T18:35:14
32,172,920
0
0
null
null
null
null
UTF-8
Python
false
false
114
py
a,b=0,1 list=[] while len(str(b))<1000: a,b=b,a+b list.append(b) print len(list) print len(str(1253764))
[ "ritwik13079@iiitd.ac.in" ]
ritwik13079@iiitd.ac.in
d43fe1f462f31569bfc7f29fc9640febc6060e29
581dec25eb1fbd6951d67451a7c04025365f22e9
/MeanShift Detector/detector.py
2926ef04b754fcd396a8f24ec9b586bd67ce36a1
[]
no_license
MrLebovsky/Course-Project-CV
c682c89b00657c80f8d3b8440a116060dfbfefc5
3dbaf4eb3a53a1ace7cd8e4bd999e40808969208
refs/heads/master
2020-05-15T22:13:30.153400
2019-04-25T12:12:37
2019-04-25T12:12:37
182,520,871
0
0
null
null
null
null
UTF-8
Python
false
false
4,473
py
#Идея алгоритма: разобьем изображение на кластеры #Найдем дескрипторы, которые принадлежат i-му кластеру #Замэтчим дескрипторы текущего кластера с эталоном. Хорошо мэтчится? Там объект! import cv2 from matplotlib import pyplot as plt MIN_MATCH_COUNT = 100 img1 = cv2.imread('images/my/etalon.jpg', 0) # эталонное изображение img2 = cv2.imread('images/my/test.jpg', 0) # изображения для поиска эталона #найдем интересные точки и построим дескрипторы sift = cv2.xfeatures2d.SIFT_create() kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) import numpy as np from sklearn.cluster import MeanShift, estimate_bandwidth #запомним координаты всех точек интереса для изображения, на котором будем осуществлять поиск объекта x = np.array([kp2[0].pt]) for i in range(len(kp2)): x = np.append(x, [kp2[i].pt], axis=0) x = x[1:len(x)] #Квантииль — значение, которое заданная случайная величина не превышает с фиксированной вероятностью #Используем 500 образцов для оценки пропускной способности MeanShift bandwidth = estimate_bandwidth(x, quantile=0.1, n_samples=500) #Вычислим средний сдвиг ms = MeanShift(bandwidth=bandwidth, bin_seeding=True, cluster_all=True) ms.fit(x) #непосредственно кластеризация labels = ms.labels_ #labels of each point cluster_centers = ms.cluster_centers_ #координаты центров каждого кластера labels_unique = np.unique(labels) #найдем все уникальные метки n_clusters_ = len(labels_unique) print("Кол-во предполагаемых кластеров : %d" % n_clusters_) #массив длинной n_clusters_, инициализировали null-значением s = [None] * n_clusters_ #Определим точки интереса, принадлежащие каждому кластеру #Идем по уникальным кластерам for i in range(n_clusters_): l = ms.labels_ d, = np.where(l == i) #получим массив индексов, где метка == i print(d.__len__()) s[i] = list(kp2[xx] for xx in d) #запомним нужные нам точки интереса des2_ = des2 #Найдем эталонный объект! for i in range(n_clusters_): kp2 = s[i] #точки интереса текущего кластера l = ms.labels_ d, = np.where(l == i) des2 = des2_[d, ] #дескрипторы текущего кластера FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) des1 = np.float32(des1) des2 = np.float32(des2) matches = flann.knnMatch(des1, des2, k=2) #замэтчили дескрипторы эталона с дескрипторами текущего кластера # Запомним все хорошие мэтчи, дальше по сути как обычно все good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) if M is None: print ("Не вычислили гомографию!") else: h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img3 = cv2.polylines(img2, [np.int32(dst)], True, (0, 255, 255), 3) #нарисуем местоположение объекта else: print ("Не достаточно мэтчей - %d/%d" % (len(good),MIN_MATCH_COUNT)) matchesMask = None plt.imshow(img3, 'gray'), plt.show()
[ "roma00712@gmail.com" ]
roma00712@gmail.com
cc8955245f1cc644c37bd057694467e385b10688
81dadecf40ea7eb46ebf6f536c615766fb767775
/boilerplate/repositories/blacklist.py
0e1b558f38b90cd6fb361e7dd3c032c171715540
[]
no_license
huunghia98/flask-login
4f0f1787d2133ae88d81d419852c29f69a54b139
11a278bd5eb9f0cf04d972522d164a892f2e0bc1
refs/heads/master
2020-05-18T13:04:51.747403
2019-08-07T10:12:08
2019-08-07T10:52:58
184,427,243
0
0
null
null
null
null
UTF-8
Python
false
false
794
py
from boilerplate import models as m from .user import get_one_user_by_email_or_username def save_user_to_blacklist(**kwargs): try: u = m.Blacklist(**kwargs) m.db.session.add(u) m.db.session.commit() return log except: print("Can't save user to blacklist") def get_user_in_blacklist(username): user = get_one_user_by_email_or_username(username, '') if user: return m.Blacklist.query.get(user.id),1 return None,False def save_user_to_blacklist_by_username(username): try: user = get_one_user_by_email_or_username(username, '') data = m.Blacklist(user_id=user.id) m.db.session.add(data) m.db.session.commit() return data except: print("Can't save user to blacklist")
[ "nguyenhuunghia295@gmail.com" ]
nguyenhuunghia295@gmail.com
ebbe58a0f10c7c76cb094aecaf73d95b6eda6250
8b2361cbccbe13cf9aa914b94824b4f389316cc5
/twatter/database/utils.py
1a353da70c05666c37120696c76f7b47b7740257
[ "MIT" ]
permissive
sorja/twatter
5903c057fcff551808511bf11ca501273a4bf781
fa0ec8bb100507394db8cf412a7b81f4f0b163b3
refs/heads/master
2021-01-17T18:24:38.226563
2016-10-26T11:22:37
2016-10-26T11:22:37
67,942,462
1
0
null
null
null
null
UTF-8
Python
false
false
4,360
py
import psycopg2 import psycopg2.extras from ..models.user import User from twatter import config def get_connection(): return psycopg2.connect(config.db['db_string']) #GET STUFF def get_one_with_id(table, id): assert table assert id query = "SELECT * FROM {} WHERE id = %s".format(table) conn = psycopg2.connect(config.db['db_string']) cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cur.execute(query, (id,)) result = cur.fetchone() cur.close() conn.close() return result def get_fields_from_table_with_id(fields, table, id_name, id): assert fields assert table assert id_name assert id query = '''select %s from {} where {} = %s'''.format(table, id_name) conn = psycopg2.connect(config.db['db_string']) cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cur.execute(query, (fields, id)) results = cur.fetchall() cur.close() conn.close() return results def get_all_from_table(table): assert table conn = psycopg2.connect(config.db['db_string']) cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cur.execute("SELECT * FROM %s", (table,)) result = cur.fetchone() cur.close() conn.close() return results def get_custom_query(query, vars=None): assert query conn = psycopg2.connect(config.db['db_string']) cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cur.execute(query, vars) results = cur.fetchall() cur.close() conn.close() return results def get_favorited_twaats_for_user(id): assert id query = """ SELECT * FROM favorited_twaats WHERE who_id = %s """ conn = psycopg2.connect(config.db['db_string']) cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) cur.execute(query, (id,)) twaats = [dict(record) for record in cur.fetchall()] # it calls .fecthone() in loop cur.close() conn.close() return twaats def get_search_results(type, term): term = str(term.lower()) query = ''; if type.lower() in 'users': query = """SELECT * FROM users WHERE lower(full_name) like %s """ if type.lower() == 'tags': return ['not implemented yet'] try: conn = psycopg2.connect(config.db['db_string']) cur = conn.cursor() cur.execute(query, (['%'+term+'%'])) search_results = cur.fetchall() #some problems with psycon, doesnt accept loop here.. cur.close() conn.close() return [User(x[0], x[1], x[2], x[3], x[4], x[5], x[6]) for x in search_results] except Exception as e: print e return e # INSERT stuff def insert_new_favorite_twaat_for_id(twaat_id, user_id): try: conn = psycopg2.connect(config.db['db_string']) cur = conn.cursor() cur.execute(""" INSERT INTO favorited_twaats (who_id, twaat_id) VALUES (%s, %s) """, (user_id, twaat_id)) conn.commit() cur.close() conn.close() except Exception as e: if('duplicate' in e.pgerror): return 'Twaat already favorited' print e return e def insert_new_twaat(user_id, text, parent_id): try: conn = get_connection() cur = conn.cursor() cur.execute(""" INSERT INTO twaat (user_id, text, parent_id) VALUES (%s, %s, %s) """, (user_id, text, parent_id)) conn.commit() cur.close() conn.close() # session['username'] = request.form['username'] except Exception as e: print e return e # UPDATE STUFF def update_custom_query(query, vars=None): try: conn = get_connection() cur = conn.cursor() cur.execute(query, vars) conn.commit() cur.close() conn.close() except Exception as e: if('duplicate' in e.pgerror): return 'Twaat already favorited' print e return e def update_user_avatar(id, path): try: conn = get_connection() cur = conn.cursor() cur.execute("""UPDATE users SET avatar = %s WHERE id = %s""", (path, id)) conn.commit() cur.close() conn.close() except Exception as e: if('duplicate' in e.pgerror): return 'Twaat already favorited' print e return e
[ "miro.sorja@gmail.com" ]
miro.sorja@gmail.com
100cc4423e0541652df26c26743f481b6d4a2d9e
1c6d8bc7876d4f034d8b671ee0f1e3bc5d13ac05
/3DUnet_attention/backup_model.py
d3a668d39e498cce4a57d25ea00c5b6d222d3935
[]
no_license
DinhHieuHoang/DAM-AL
29ddcb98616e49c5e7f57f38897c5148bb2ccd6e
57d07387243e5b85cdaa079347019bd718c890fa
refs/heads/main
2023-09-04T00:30:18.746352
2021-11-11T03:07:51
2021-11-11T03:07:51
420,787,902
1
0
null
null
null
null
UTF-8
Python
false
false
33,639
py
from keras.models import * from attention import * from bilinear_upsampling import BilinearUpsampling, BilinearUpsampling3D import tensorflow as tf from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope from tensorpack.tfutils.summary import add_moving_summary from tensorpack.tfutils.argscope import argscope from tensorpack.tfutils.scope_utils import under_name_scope from tensorpack.models import ( layer_register ) from custom_ops import BatchNorm3d, InstanceNorm5d import numpy as np # from scipy.ndimage import distance_transform_edt as distance from scipy import ndimage import scipy import config import tensorflow.contrib.slim as slim import utils import time PADDING = "SAME" DATA_FORMAT="channels_last" BASE_FILTER = 16 class Copy(Layer): def call(self, inputs, **kwargs): copy = tf.identity(inputs) return copy def compute_output_shape(self, input_shape): return input_shape def AtrousBlock3D(input_tensor, filters, rate, block_id, stride=1): x = tf.layers.conv3d(inputs=input_tensor, filters=filters, kernel_size=(3,3,3), strides=(stride, stride, stride), dilation_rate=(rate, rate, rate), padding=PADDING, use_bias=False, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name=block_id + "_dilation") # x = Conv3D(filters, (3, 3, 3), strides=(stride, stride, stride), dilation_rate=(rate, rate, rate), # padding='same', use_bias=False, name=block_id + '_dilation')(input_tensor) return x def CFE3D(input_tensor, filters, block_id): rate = [3, 5, 7] cfe0 = tf.layers.conv3d(inputs=input_tensor, filters=filters, kernel_size=(1,1,1), use_bias=False, strides=1, padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name=block_id + "_cfe0") # cfe0 = Conv3D(filters, (1, 1, 1), padding='same', use_bias=False, name=block_id + '_cfe0')( # input_tensor) cfe1 = AtrousBlock3D(input_tensor, filters, rate[0], block_id + '_cfe1') cfe2 = AtrousBlock3D(input_tensor, filters, rate[1], block_id + '_cfe2') cfe3 = AtrousBlock3D(input_tensor, filters, rate[2], block_id + '_cfe3') cfe_concat = tf.concat([cfe0, cfe1, cfe2, cfe3], axis=-1, name=block_id + 'concatcfe') #cfe_concat = Concatenate(name=block_id + 'concatcfe', axis=-1)([cfe0, cfe1, cfe2, cfe3]) # with tf.variable_scope(block_id + "_BN") as scope: # cfe_concat = BN_Relu(cfe_concat) return cfe_concat @layer_register(log_shape=True) def unet3d_attention(inputs): print("inputs ", inputs) depth = config.DEPTH filters = [] down_list = [] layer = tf.layers.conv3d(inputs=inputs, filters=BASE_FILTER, kernel_size=(3,3,3), strides=1, padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name="init_conv") print(layer.name, layer.shape[1:]) # if config.RSU: # mid_ch = BASE_FILTER // 2 # for first RSU mid channel # print('RSU at C12 with pooling replaced by conv3d') for d in range(depth): if config.FILTER_GROW: num_filters = BASE_FILTER * (2**d) else: num_filters = BASE_FILTER filters.append(num_filters) # if config.RSU: # # if depth < 7: # # height = 7 - d # # else: # # height = depth - d # height = 7 - d # if height < 6: # layer = Unet3dBlock('down{}'.format(d), layer, kernels=(3,3,3), n_feat=num_filters, s=1) # print("Unet downsampling ",d," ",layer.shape[1:]) # else: # layer = RSU('down{}RSU'.format(height), height, layer, mid_ch, num_filters) # print("RSU ",height," ",layer.shape[1:]) # # if height < 4: # from stage 5 and more => change dilated into True # # layer = RSU('down{}RSU4'.format(d), 4, layer, mid_ch, num_filters, False) # # print("RSU4-"+str(d)+" ",layer.shape[1:]) # # else: # # layer = RSU('down{}RSU'.format(d), height, layer, mid_ch, num_filters) # # print("RSU ",d," ",layer.shape[1:]) # mid_ch = num_filters // 2 # else: layer = Unet3dBlock('down{}'.format(d), layer, kernels=(3,3,3), n_feat=num_filters, s=1) print("Residual bock downsampling ",d," ",layer.shape[1:]) down_list.append(layer) if d != depth - 1: layer = tf.layers.conv3d(inputs=layer, filters=num_filters*2, kernel_size=(3,3,3), strides=(2,2,2), padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name="stride2conv{}".format(d)) print("Down Conv3D ",d, " ", layer.shape[1:]) # print(layer.name,layer.shape[1:]) C1 = tf.layers.conv3d(inputs=down_list[0], filters=64, kernel_size=(3,3,3), strides=1, padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name="C1_conv") C2 = tf.layers.conv3d(inputs=down_list[1], filters=64, kernel_size=(3,3,3), strides=1, padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name="C2_conv") print("Low level feature 1\t", C1.shape[1:]) print("Low level feature 2\t", C2.shape[1:]) C3_cfe = CFE3D(down_list[2], 32, 'C3_cfe') print("High level feature 1 CFE\t", C3_cfe.shape[1:]) C4_cfe = CFE3D(down_list[3], 32, 'C4_cfe') print("High level feature 2 CFE\t", C4_cfe.shape[1:]) C5_cfe = CFE3D(down_list[4], 32, 'C5_cfe') print("High level feature 3 CFE\t", C5_cfe.shape[1:]) if config.stair_case: C5_cfe = UnetUpsample('C5_cfe_up4', C5_cfe, 2, 128) C345 = tf.concat([C4_cfe, C5_cfe], axis=-1, name='C45_concat') C345 = UnetUpsample('C45_up2', C4_cfe, 2, 128) C345 = tf.concat([C345, C3_cfe], axis=-1, name='C345_aspp_concat_stair_case') print("@Stair case version High level features aspp concat\t", C345.shape[1:]) else: C5_cfe = UnetUpsample('C5_cfe_up4', C5_cfe, 4, 128) C4_cfe = UnetUpsample('C4_cfe_up2', C4_cfe, 2, 128) C345 = tf.concat([C3_cfe, C4_cfe, C5_cfe], axis=-1, name='C345_aspp_concat') print("High level features aspp concat\t", C345.shape[1:]) if config.CA_attention: C345 = ChannelWiseAttention3D(C345, name='C345_ChannelWiseAttention_withcpfe') print('High level features CA\t', C345.shape[1:]) C345 = tf.layers.conv3d(inputs=C345, filters=64, kernel_size=(1,1,1), strides=1, padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name="C345_conv") print('High level features conv\t', C345.shape[1:]) C345 = UnetUpsample('C345_up4', C345, 4, 64) print('High level features upsampling\t', C345.shape[1:]) if config.SA_attention: SA = SpatialAttention3D(C345, 'spatial_attention') print('High level features SA\t', SA.shape[1:]) C2 = UnetUpsample('C2_up2', C2, 2, 64) C12 = tf.concat([C1, C2], axis=-1, name='C12_concat') C12 = tf.layers.conv3d(inputs=C12, filters=64, kernel_size=(3,3,3), strides=1, padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name="C12_conv") print('Low level feature conv\t', C12.shape[1:]) if config.MULTI_LOSS == True: C12_backup = tf.identity(C12) if config.transformerSA > 1: C12_backup = tf.identity(C12) print('transformer spatial attention level: ' + str(config.transformerSA)) if config.SA_attention: C12 = tf.math.multiply(SA, C12, name='C12_atten_mutiply') for i in range(1, config.transformerSA): SA = SpatialAttention3D(C12, 'spatial_attention_'+str(i+1)) C12 = tf.math.multiply(SA, C12_backup, name='C12_atten_mutiply_'+str(i+1)) fea = tf.concat([C12, C345], axis=-1, name='fuse_concat') print('Low + High level feature\t', fea.shape[1:]) layer = tf.layers.conv3d(fea, filters=config.NUM_CLASS, kernel_size=(3,3,3), padding="SAME", activation=tf.identity, data_format=DATA_FORMAT, name="final") if DATA_FORMAT == 'channels_first': layer = tf.transpose(layer, [0, 2, 3, 4, 1]) # to-channel last print("final", layer.shape[1:]) # [3, num_class, d, h, w] if config.MULTI_LOSS == True: C12 = tf.layers.conv3d(C12_backup, filters=config.NUM_CLASS, kernel_size=(3,3,3), padding="SAME", activation=tf.identity, data_format=DATA_FORMAT, name="C12_4") C345 = tf.layers.conv3d(C345, filters=config.NUM_CLASS, kernel_size=(3,3,3), padding="SAME", activation=tf.identity, data_format=DATA_FORMAT, name="C345_4") print("final C12", C12.shape[1:]) print("final C345", C345.shape[1:]) return C12, C345, layer return layer def Upsample3D(prefix, l, scale=2): return tf.keras.layers.UpSampling3D(size=(scale,scale,scale), data_format=DATA_FORMAT)(l) def RSU(name, height, in_ch, mid_ch, out_ch, dilated=False): def REBNCONV(in_ch=3, out_ch=3, dilate=1): return tf.layers.conv3d(inputs=in_ch, filters=out_ch, kernel_size=(3,3,3), dilation_rate=(1*dilate, 1*dilate, 1*dilate), padding=PADDING, use_bias=False, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name=None) down_list = [] down_list.append(REBNCONV(in_ch, out_ch)) for i in range(1, height): dilate = 1 if not dilated else 2 ** (i - 1) down_list.append(REBNCONV(down_list[i-1], mid_ch, dilate=dilate)) down_list[i] = tf.layers.conv3d(inputs=down_list[i], filters=mid_ch, strides=(2,2,2), kernel_size=(2,2,2))#tf.layers.MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2))(down_list[i]) for i in range(height, height+1): dilate = 1 if not dilated else 2 ** (i - 1) down_list.append(REBNCONV(down_list[i-1], mid_ch, dilate=dilate)) up_layer = down_list[height] for i in range(height-1, 1, -1): dilate = 1 if not dilated else 2 ** (i - 1) up_layer = tf.concat([up_layer, down_list[i]], axis=-1) up_layer = REBNCONV(up_layer, mid_ch, dilate=dilate) up_layer = tf.keras.layers.UpSampling3D(size=(2,2,2), data_format=DATA_FORMAT)(up_layer) up_layer = tf.concat([up_layer, down_list[1]], axis=-1) up_layer = REBNCONV(up_layer, out_ch, dilate=dilate) up_layer = tf.keras.layers.UpSampling3D(size=(2,2,2), data_format=DATA_FORMAT)(up_layer) return up_layer + down_list[0] def UnetUpsample(prefix, l, scale, num_filters): """ l = tf.layers.conv3d_transpose(inputs=l, filters=num_filters, kernel_size=(2,2,2), strides=2, padding=PADDING, activation=tf.nn.relu, data_format=DATA_FORMAT, name="up_conv0_{}".format(prefix)) """ l = Upsample3D('', l, scale) l = tf.layers.conv3d(inputs=l, filters=num_filters, kernel_size=(3,3,3), strides=1, padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name="up_conv1_{}".format(prefix)) return l def BN_Relu(x): if config.INSTANCE_NORM: l = InstanceNorm5d('ins_norm', x, data_format=DATA_FORMAT) else: l = BatchNorm3d('bn', x, axis=1 if DATA_FORMAT == 'channels_first' else -1) l = tf.nn.relu(l) return l def Unet3dBlock(prefix, l, kernels, n_feat, s): if config.RESIDUAL: l_in = l for i in range(2): l = tf.layers.conv3d(inputs=l, filters=n_feat, kernel_size=kernels, strides=1, padding=PADDING, activation=lambda x, name=None: BN_Relu(x), data_format=DATA_FORMAT, name="{}_conv_{}".format(prefix, i)) return l_in + l if config.RESIDUAL else l ### from niftynet #### def labels_to_one_hot(ground_truth, num_classes=1): """ Converts ground truth labels to one-hot, sparse tensors. Used extensively in segmentation losses. :param ground_truth: ground truth categorical labels (rank `N`) :param num_classes: A scalar defining the depth of the one hot dimension (see `depth` of `tf.one_hot`) :return: one-hot sparse tf tensor (rank `N+1`; new axis appended at the end) """ # read input/output shapes if isinstance(num_classes, tf.Tensor): num_classes_tf = tf.to_int32(num_classes) else: num_classes_tf = tf.constant(num_classes, tf.int32) input_shape = tf.shape(ground_truth) output_shape = tf.concat( [input_shape, tf.reshape(num_classes_tf, (1,))], 0) if num_classes == 1: # need a sparse representation? return tf.reshape(ground_truth, output_shape) # squeeze the spatial shape ground_truth = tf.reshape(ground_truth, (-1,)) # shape of squeezed output dense_shape = tf.stack([tf.shape(ground_truth)[0], num_classes_tf], 0) # create a rank-2 sparse tensor ground_truth = tf.to_int64(ground_truth) ids = tf.range(tf.to_int64(dense_shape[0]), dtype=tf.int64) ids = tf.stack([ids, ground_truth], axis=1) one_hot = tf.SparseTensor( indices=ids, values=tf.ones_like(ground_truth, dtype=tf.float32), dense_shape=tf.to_int64(dense_shape)) # resume the spatial dims one_hot = tf.sparse_reshape(one_hot, output_shape) return one_hot def generalised_dice_loss(prediction, ground_truth, weight_map=None, type_weight='Square'): """ Function to calculate the Generalised Dice Loss defined in Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning loss function for highly unbalanced segmentations. DLMIA 2017 :param prediction: the logits :param ground_truth: the segmentation ground truth :param weight_map: :param type_weight: type of weighting allowed between labels (choice between Square (square of inverse of volume), Simple (inverse of volume) and Uniform (no weighting)) :return: the loss """ prediction = tf.cast(prediction, tf.float32) if len(ground_truth.shape) == len(prediction.shape): ground_truth = ground_truth[..., -1] one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1]) if weight_map is not None: n_classes = prediction.shape[1].value weight_map_nclasses = tf.reshape( tf.tile(weight_map, [n_classes]), prediction.get_shape()) ref_vol = tf.sparse_reduce_sum( weight_map_nclasses * one_hot, reduction_axes=[0]) intersect = tf.sparse_reduce_sum( weight_map_nclasses * one_hot * prediction, reduction_axes=[0]) seg_vol = tf.reduce_sum( tf.multiply(weight_map_nclasses, prediction), 0) else: ref_vol = tf.sparse_reduce_sum(one_hot, reduction_axes=[0]) intersect = tf.sparse_reduce_sum(one_hot * prediction, reduction_axes=[0]) seg_vol = tf.reduce_sum(prediction, 0) if type_weight == 'Square': weights = tf.reciprocal(tf.square(ref_vol)) elif type_weight == 'Simple': weights = tf.reciprocal(ref_vol) elif type_weight == 'Uniform': weights = tf.ones_like(ref_vol) else: raise ValueError("The variable type_weight \"{}\"" "is not defined.".format(type_weight)) new_weights = tf.where(tf.is_inf(weights), tf.zeros_like(weights), weights) weights = tf.where(tf.is_inf(weights), tf.ones_like(weights) * tf.reduce_max(new_weights), weights) generalised_dice_numerator = \ 2 * tf.reduce_sum(tf.multiply(weights, intersect)) generalised_dice_denominator = \ tf.reduce_sum(tf.multiply(weights, seg_vol + ref_vol)) + 1e-6 generalised_dice_score = \ generalised_dice_numerator / generalised_dice_denominator return 1 - generalised_dice_score def dice(prediction, ground_truth, weight_map=None): """ Function to calculate the dice loss with the definition given in Milletari, F., Navab, N., & Ahmadi, S. A. (2016) V-net: Fully convolutional neural networks for volumetric medical image segmentation. 3DV 2016 using a square in the denominator :param prediction: the logits :param ground_truth: the segmentation ground_truth :param weight_map: :return: the loss """ ground_truth = tf.to_int64(ground_truth) prediction = tf.cast(prediction, tf.float32) ids = tf.range(tf.to_int64(tf.shape(ground_truth)[0]), dtype=tf.int64) ids = tf.stack([ids, ground_truth], axis=1) one_hot = tf.SparseTensor( indices=ids, values=tf.ones_like(ground_truth, dtype=tf.float32), dense_shape=tf.to_int64(tf.shape(prediction))) if weight_map is not None: n_classes = prediction.shape[1].value weight_map_nclasses = tf.reshape( tf.tile(weight_map, [n_classes]), prediction.get_shape()) dice_numerator = 2.0 * tf.sparse_reduce_sum( weight_map_nclasses * one_hot * prediction, reduction_axes=[0]) dice_denominator = \ tf.reduce_sum(weight_map_nclasses * tf.square(prediction), reduction_indices=[0]) + \ tf.sparse_reduce_sum(one_hot * weight_map_nclasses, reduction_axes=[0]) else: dice_numerator = 2.0 * tf.sparse_reduce_sum( one_hot * prediction, reduction_axes=[0]) dice_denominator = \ tf.reduce_sum(tf.square(prediction), reduction_indices=[0]) + \ tf.sparse_reduce_sum(one_hot, reduction_axes=[0]) epsilon_denominator = 0.00001 dice_score = dice_numerator / (dice_denominator + epsilon_denominator) return 1.0 - tf.reduce_mean(dice_score) def dice_mixup(prediction, ground_truth, weight_map=None): """ Function to calculate the dice loss with the definition given in Milletari, F., Navab, N., & Ahmadi, S. A. (2016) V-net: Fully convolutional neural networks for volumetric medical image segmentation. 3DV 2016 using a square in the denominator :param prediction: the logits :param ground_truth: the segmentation ground_truth :param weight_map: :return: the loss """ prediction = tf.cast(prediction, tf.float32) if weight_map is not None: n_classes = prediction.shape[1].value weight_map_nclasses = tf.reshape( tf.tile(weight_map, [n_classes]), prediction.get_shape()) dice_numerator = 2.0 * tf.reduce_sum( weight_map_nclasses * ground_truth * prediction, axis=[0]) dice_denominator = \ tf.reduce_sum(weight_map_nclasses * tf.square(prediction), reduction_indices=[0]) + \ tf.reduce_sum(tf.square(ground_truth) * weight_map_nclasses, axis=[0]) else: dice_numerator = 2.0 * tf.reduce_sum( ground_truth * prediction, axes=[0]) dice_denominator = \ tf.reduce_sum(tf.square(prediction), reduction_indices=[0]) + \ tf.reduce_sum(tf.square(ground_truth), axes=[0]) epsilon_denominator = 0.00001 dice_score = dice_numerator / (dice_denominator + epsilon_denominator) return 1.0 - tf.reduce_mean(dice_score) def _cal_signed_distance_map(posmask): # given positive mask, calculate corresponding signed distance map # return has the same shape with that of the input negmask = ~posmask posdis = scipy.ndimage.distance_transform_edt(posmask) negdis = scipy.ndimage.distance_transform_edt(negmask) res = negdis * np.array(negmask, dtype=np.float) res = res - (posdis - 1.0) * np.array(posmask, dtype=np.float) return res def signed_distance_map(ground_truth): """ Function re-written from https://github.com/JunMa11/SegWithDistMap. Compute the signed distance map of the ground truth Paper: Kervadec et al., Boundary loss for highly unbalanced segmentation Parameters ---------- ground_truth: array_like The segmentation ground truth, shape=(x,y,z), value: 0-background, 1-ET, 2-WT, 3-CT Returns ------- ground_truth_sdm: array_like The signed distance map derived from the ground truth, shape=(x, y, z, label) """ res = None for idx in range(1, config.NUM_CLASS): posmask = ground_truth == idx sdm = None if posmask.any(): sdm = _cal_signed_distance_map(posmask) else: sdm = np.ones(posmask.shape) if idx == 1: res = np.array([sdm]) else: res = np.concatenate((res, [sdm]), axis=0) return res def _get_sdm(ground_truth, idx): # 1: ET 2: WT 3: TC posmask = ground_truth == idx if posmask.any(): sdm = _cal_signed_distance_map(posmask) else: sdm = np.ones(posmask.shape) return sdm # numpy ndarray def modified_distance_map(ground_truth, mode=config.FOCAL_MODE, coeff=config.FOCAL_SDM_COEFF): """ Returns new processed distance map """ res = None if mode == config.FOCAL_MODE_POWER: """ TODO write docs """ def power_dm(sdm, coeff=coeff): dm = np.abs(sdm) if coeff < 0 and (dm == 0).any(): # power with negative number -> must add ones to make all zeros greater than zeros dm = dm + np.ones(sdm.shape) dm = np.power(dm, coeff) return dm for idx in range(1, config.NUM_CLASS): dm = power_dm(_get_sdm(ground_truth, idx)) res = np.array([dm]) if idx == 1 else np.concatenate((res, [dm]), axis=0) elif mode == config.FOCAL_MODE_EXP: # """ TODO write docs """ def exp_sdm(sdm): dm = -1 * np.abs(sdm) dm = np.exp(dm) return dm for idx in range(1, config.NUM_CLASS): dm = exp_sdm(_get_sdm(ground_truth, idx)) res = np.array([dm]) if idx == 1 else np.concatenate((res, [dm]), axis=0) elif mode == config.FOCAL_MODE_DILATE: for idx in range(1, config.NUM_CLASS): idx_map = np.array(ground_truth) == idx struct = ndimage.generate_binary_structure(3, 1) dilation_map = ndimage.binary_dilation(input=idx_map, structure=struct, iterations=coeff) erosion_map = ndimage.binary_erosion(input=idx_map, structure=struct, iterations=coeff) sdm = np.logical_xor(dilation_map, erosion_map).astype(np.float32) sdm[idx == True] = -1 res = np.array([sdm]) if idx == 1 else np.concatenate((res, [sdm]), axis=0) else: # default: signed distance map res = signed_distance_map_with_edt(ground_truth) return res def prediction_focal(prediction, ground_truth, mode=config.IDENTITY): """ Note that the background prediction will be discarded """ def get_sign_map(idx): sign_map = tf.where(tf.equal(ground_truth, tf.constant(idx, dtype=tf.float32)), tf.ones_like(ground_truth) * -1.0, tf.ones_like(ground_truth)) # binary 0-1 return sign_map def get_pred_map(idx): pred_map = prediction[idx] return pred_map proc_pred = np.array([]) if mode == config.FOCAL_FUNCTION: # discard background (idx=0) and process remains for idx in range(1, config.NUM_CLASS): sign = get_sign_map(idx) pred = get_pred_map(idx) pred = tf.where(tf.equal(sign, 1.0), 1.0 - pred, pred) # focal loss - -(1 - x)^gamma * log(x) pred = tf.pow((1.0 - pred), config.FOCAL_GAMMA) * tf.log(pred) * (-1) proc_pred = tf.expand_dims(pred, axis=0) if idx == 1 else tf.concat((proc_pred, tf.expand_dims(pred, axis=0)), axis=0) elif mode == config.TSA_FUNCTION: # discard background (idx=0) and process remains for idx in range(1, config.NUM_CLASS): sign = get_sign_map(idx) pred = get_pred_map(idx) pred = tf.where(tf.equal(sign, 1.0), 1.0 - pred, pred) # Training Signal Annealing func - exp_schedule - e^(-5x) pred = tf.exp(-1.0 * pred * config.TSA_MIN_LOSS_PARAM) proc_pred = tf.expand_dims(pred, axis=0) if idx == 1 else tf.concat((proc_pred, tf.expand_dims(pred, axis=0)), axis=0) elif mode == config.POWER_FUNCTION: for idx in range(1, config.NUM_CLASS): sign = get_sign_map(idx) pred = get_pred_map(idx) pred = tf.where(tf.equal(sign, 1.0), 1.0 - pred, pred) # Power func - (1-x)^alpha pred = tf.pow((1.0 - pred), config.POWER_ALPHA) proc_pred = tf.expand_dims(pred, axis=0) if idx == 1 else tf.concat((proc_pred, tf.expand_dims(pred, axis=0)), axis=0) else: # config.IDENTITY or anything else for idx in range(1, config.NUM_CLASS): pred = get_sign_map(idx) * get_pred_map(idx) proc_pred = tf.expand_dims(pred, axis=0) if idx == 1 else tf.concat((proc_pred, tf.expand_dims(pred, axis=0)), axis=0) return proc_pred def boundary_focal_loss(prediction, ground_truth, ground_truth_dm): """ New loss which is the combination of the parameterized boundary loss and Focal Loss idea Parameters ---------- prediction: array_like The logits, shape=(x, y, z, label) ground_truth: array_like The segmentation ground truth, shape=(x,y,z), value: 0-background, 1-ET, 2-WT, 3-CT Returns ------- bd_focal_loss: float """ # process prediction prediction = tf.cast(prediction, tf.float32) prediction = tf.transpose(prediction, perm=(3, 0, 1, 2)) # transpose to (n_class, depth, height, width) ground_truth_dm = tf.transpose(ground_truth_dm, perm=(3,0,1,2)) print("boundary_focal_loss", prediction.get_shape(), ground_truth.get_shape()) # prediction = tf.py_function(func=prediction_focal, inp=[prediction, ground_truth, config.PREDICTION_FOCAL_FUNCTION], Tout=tf.float32) prediction = prediction_focal(prediction, ground_truth, config.PREDICTION_FOCAL_FUNCTION) print("boundary_focal_loss", prediction.get_shape(), ground_truth_dm.get_shape()) # combine weight_sum = tf.reduce_sum(ground_truth_dm) multiplied = tf.einsum("cxyz, cxyz -> cxyz", prediction, ground_truth_dm) # bd_focal_loss = tf.reduce_mean(multiplied) # make the gradient too small bd_focal_sum = tf.reduce_sum(multiplied) bd_focal_loss= tf.divide(bd_focal_sum, weight_sum) return bd_focal_loss def boundary_loss(prediction, ground_truth_dm): """ Function re-written from https://github.com/JunMa11/SegWithDistMap. Compute the signed distance map of the ground truth. Paper: Kervadec et al., Boundary loss for highly unbalanced segmentation Parameters ---------- prediction: array_like The logits, shape=(x, y, z, label) ground_truth: array_like The segmentation ground truth, shape=(x,y,z), value: 0-background, 1-ET, 2-WT, 3-CT Returns ------- boundary_loss: float """ # process prediction prediction = tf.cast(prediction, tf.float32) prediction = tf.transpose(prediction, perm=(3, 0, 1, 2)) # transpose to (n_class, depth, height, width) ground_truth_dm = tf.transpose(ground_truth_dm, perm=(3,0,1,2)) sliced_prediction = prediction[1:] # discard background layer? # combine multiplied = tf.einsum("cxyz, cxyz -> cxyz", sliced_prediction, ground_truth_dm) # [n_class except background, depth, height, width] bd_loss = tf.reduce_mean(multiplied) return bd_loss def Loss(feature, weight, gt, distance_map=None): # compute batch-wise losses = [] bd_loss_weight = tf.get_variable("bd_loss_weight", initializer=config.BD_LOSS_LINEAR_WEIGHT, trainable=False) dc_loss_weight = 1.0 - bd_loss_weight bd_losses=[] dc_losses=[] for idx in range(config.BATCH_SIZE): f = tf.reshape(feature[idx], [-1, config.NUM_CLASS]) # Flatten feature into [|volume|, 4 - num_class] #f = tf.cast(f, dtype=tf.float32) #f = tf.nn.softmax(f) w = tf.reshape(weight[idx], [-1]) # Flatten into 1D array if config.MIXUP: g = tf.reshape(gt[idx], [-1, config.NUM_CLASS]) # Flatten ground truth into [|volume|, 4 - num_class] else: g = tf.reshape(gt[idx], [-1]) # Flatten into 1D array if g.shape.as_list()[-1] == 1: g = tf.squeeze(g, axis=-1) # (nvoxel, ) if w.shape.as_list()[-1] == 1: w = tf.squeeze(w, axis=-1) # (nvoxel, ) f = tf.nn.softmax(f) if config.MIXUP: loss_per_batch = dice_mixup(f, g, weight_map=w) else: # MIXUP == False if config.BOUNDARY_LOSS: bd_loss = boundary_loss(tf.nn.softmax(feature[idx]), distance_map[idx]) dc_loss = dice(f, g, weight_map=w) bd_losses.append(bd_loss) dc_losses.append(dc_loss) loss_per_batch = (dc_loss * dc_loss_weight) + (bd_loss * bd_loss_weight) elif config.BOUNDARY_FOCAL: if gt[idx].shape.as_list()[-1] == 1: ground_truth = tf.squeeze(gt[idx], axis=-1) bd_focal_loss = boundary_focal_loss(tf.nn.softmax(feature[idx]), ground_truth, distance_map[idx]) dc_loss = dice(f, g, weight_map=w) bd_losses.append(bd_focal_loss) dc_losses.append(dc_loss) loss_per_batch = (dc_loss * dc_loss_weight) + (bd_focal_loss * bd_loss_weight) else: loss_per_batch = dice(f, g, weight_map=w) # loss_per_batch = cross_entropy(f, g, weight_map=w) losses.append(loss_per_batch) if config.BOUNDARY_LOSS: tf.summary.scalar('bd_loss_weight', bd_loss_weight) tf.summary.scalar('bd_loss', tf.reduce_mean(bd_losses)) tf.summary.scalar('dc_loss_weight', dc_loss_weight) tf.summary.scalar('dc_loss', tf.reduce_mean(dc_losses)) tf.summary.scalar('loss', tf.reduce_mean(losses)) elif config.BOUNDARY_FOCAL: tf.summary.scalar('bd_loss_weight', bd_loss_weight) tf.summary.scalar('bd_focal_loss', tf.reduce_mean(bd_losses)) tf.summary.scalar('dc_loss_weight', dc_loss_weight) tf.summary.scalar('dc_loss', tf.reduce_mean(dc_losses)) tf.summary.scalar('loss', tf.reduce_mean(losses)) return tf.reduce_mean(losses, name="dice_loss")
[ "noreply@github.com" ]
DinhHieuHoang.noreply@github.com
347e50e12b8769ef432ef42169ec2bef2a6ddd96
b252d0cb253aa216b435469ad5be07ac0050c2e4
/spec/Muraille.spec
4648fd2dd391d6251bd25df2fe1012903c8767c5
[]
no_license
dziry/CPSWarcraft
1c79353d2ff3670621b3584195393ff52f7e5b33
709b4b3a7fc154d78e0cd5e69a145f07563f2936
refs/heads/master
2021-05-29T10:46:14.706414
2015-06-22T18:27:02
2015-06-22T18:27:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
658
spec
Muraille service : Muraille refine : Terrain observators : pointsDeVie : [Muraille] → int estDetruite : [Muraille] → int Constructors : init : int × int × int → [Muraille] pre init(largeur,hauteur, pointsDeVie) require largeur%2=1 ∧ hauteur%2=1 ∧ largeur > 1 ∧ hauteur > 1 ∧ pointsDeVie>0 Operators : frappee : [Muraille] × int → [Muraille] pre frappee(M, force) require ¬estDetruite(M) ∧ f >= 0 Observations : [invariants] estDetruite(M) =(min) pointsDeVie(M) ≤ 0 [init] Terrain.largeur(init(l,h,p))=l Terrain.hauteur(init(l,h,p))=h pointsDeVie(init(l,h,p))=p [frappee] pointsDeVie(frappee(M)) = pointsDeVie(M) - f
[ "larbiyoucef.mohamed@gmail.com" ]
larbiyoucef.mohamed@gmail.com
c14137e9752ea6d63b4176c7c0ea538b39e6814d
515f1da38c6d33c9cf8597b037bbf4e8c9ee615d
/py_practice/13_01_2020_Duck_Typing_in_Python/Exception_example.py
cf74fa19c020b15e7af111bf6e69a199a47ebda1
[]
no_license
patelvini13/python
776be39f2f733d4d82cb0e219ef4587e01e7960a
66a2f4cf9e2bb02149fe9ee1554ca67293ea02c8
refs/heads/master
2021-01-02T00:29:13.752439
2020-02-10T02:38:57
2020-02-10T02:38:57
239,411,540
0
0
null
null
null
null
UTF-8
Python
false
false
321
py
def exception_func(): """exception handling in python""" try: f = open("exception_file.txt","r") except: # raise Exception ("FileNotFoundException") print("FileNotFoundException occurs while opening a file!!") else: print("File open successfully.") finally: print("Done!") exception_func()
[ "noreply@github.com" ]
patelvini13.noreply@github.com
74d24489bae3d44f5ab306ac3571c32915f1f4c2
65099340ea576c4872627ac5e1ab1b7ac063a55d
/cup/shell/oper.py
dfd29f9b214ddf89e161591caf4b4c6e74cdbb0e
[ "Apache-2.0", "MIT" ]
permissive
lotapp/CUP
e8e1dd576639006f481402ab81f21283c66e1acd
c3b3d4cdc2627a0fecbfecc6adaaa6670ac73a1c
refs/heads/master
2020-04-11T17:45:15.771994
2018-12-16T06:15:36
2018-12-16T06:15:36
161,972,903
0
0
NOASSERTION
2018-12-16T06:04:36
2018-12-16T06:04:34
null
UTF-8
Python
false
false
22,267
py
#!/usr/bin/python # -*- coding: utf-8 -* # Copyright: [CUP] - See LICENSE for details. # Authors: Zhao Minghao, Guannan Ma """ :description: shell operations related module """ import os import time import sys import shutil import signal import random import hashlib import platform import warnings import datetime import threading # import traceback import subprocess # import collections import cup from cup import decorators from cup import err # linux only import if platform.system() == 'Linux': __all__ = [ 'rm', 'rmrf', 'kill', 'is_process_used_port', 'is_port_used', 'is_proc_exist', 'is_proc_exist', 'is_process_running', 'contains_file', 'backup_file' ] # universal import (platform indepedent) else: __all__ = [ 'contains_file', 'backup_file' ] # linux functionalities {{ # pylint: disable=C0103 def rm(name): """ rm the file if no exception happens. Will not raise exception if it fails """ try: os.remove(name) except OSError as error: cup.log.warn("rm oserror: %s" % error) def rmrf(fpath, safemode=True): """ :param fpath: files/direcotry to be deleted. :param safemode: True by default. You cannot delete root / when safemode is True """ @decorators.needlinux def _real_rmrf(fpath, safemode): """ real rmrf """ if safemode: if os.path.normpath(os.path.abspath(fpath)) == '/': raise err.ShellException('cannot rmtree root / under safemode') if os.path.isfile(fpath): os.unlink(fpath) else: shutil.rmtree(fpath) return _real_rmrf(fpath, safemode) def is_process_running(path, name): """ Judge if the executable is running by comparing /proc files. :platforms: linux only. Will raise exception if running on other platforms :param path: executable current working direcotry :param name: executable name :return: return True if the process is running. Return False otherwise. """ @decorators.needlinux def _real_is_proc_exist(path, name): """ _real_is_proc_exist """ path = os.path.realpath(os.path.abspath(path)) cmd = 'ps -ef|grep %s|grep -v "^grep "|grep -v "^vim "|grep -v "^less "|\ grep -v "^vi "|grep -v "^cat "|grep -v "^more "|grep -v "^tail "|\ awk \'{print $2}\'' % (name) ret = cup.shell.ShellExec().run(cmd, 10) pids = ret['stdout'].strip().split('\n') if len(pids) == 0 or len(pids) == 1 and len(pids[0]) == 0: return False for pid in pids: for sel_path in ["cwd", "exe"]: cmd = 'ls -l /proc/%s/%s|awk \'{print $11}\' ' % (pid, sel_path) ret = cup.shell.ShellExec().run(cmd, 10) pid_path = ret['stdout'].strip().strip() if pid_path.find(path) == 0: # print '%s is exist: %s' % (name, path) return True return False return _real_is_proc_exist(path, name) # for compatibility. Do not delete this line: is_proc_exist = is_process_running def _kill_child(pid, sign): cmd = 'ps -ef|grep %s|grep -v grep|awk \'{print $2,$3}\'' % (pid) ret = cup.shell.ShellExec().run(cmd, 10) pids = ret['stdout'].strip().split('\n') for proc in pids: p_id = proc.split() if p_id[1] == pid: _kill_child(p_id[0], sign) if p_id[0] == pid: if len(sign) == 0: cup.shell.execshell('kill %s' % pid) elif sign == '9' or sign == '-9': cup.shell.execshell('kill -9 %s' % pid) elif sign == 'SIGSTOP' or sign == '19' or sign == '-19': cup.shell.execshell('kill -19 %s' % pid) elif sign == 'SIGCONT' or sign == '18' or sign == '-18': cup.shell.execshell('kill -18 %s' % pid) else: cup.log.error('sign error') def kill(path, name, sign='', b_kill_child=False): """ will judge if the process is running by calling function (is_process_running), then send kill signal to this process :param path: executable current working direcotry (cwd) :param name: executable name :param sign: kill sign, e.g. 9 for SIGKILL, 15 for SIGTERM :b_kill_child: kill child processes or not. False by default. """ path = os.path.realpath(os.path.abspath(path)) # path = os.path.abspath(path) cmd = 'ps -ef|grep %s|grep -v grep|awk \'{print $2}\'' % (name) ret = cup.shell.ShellExec().run(cmd, 10) pids = ret['stdout'].strip().split('\n') for pid in pids: cmd = 'ls -l /proc/%s/cwd|awk \'{print $11}\' ' % (pid) ret = cup.shell.ShellExec().run(cmd, 10) if ret['returncode'] != 0: return False pid_path = ret['stdout'].strip() if pid_path.find(path) == 0 or path.find(pid_path) == 0: if b_kill_child is True: _kill_child(pid, sign) if len(sign) == 0: cup.shell.execshell('kill %s' % pid) elif sign == '9' or sign == '-9': cup.shell.execshell('kill -9 %s' % pid) elif sign == 'SIGSTOP' or sign == '19' or sign == '-19': cup.shell.execshell('kill -19 %s' % pid) elif sign == 'SIGCONT' or sign == '18' or sign == '-18': cup.shell.execshell('kill -18 %s' % pid) else: cup.log.error('sign error') return True def backup_file(srcpath, filename, dstpath, label=None): """ Backup srcpath/filename to dstpath/filenamne.label. If label is None, cup will use time.strftime('%H:%M:S') :dstpath: will create the folder if no existence """ if label is None: label = time.strftime('%H:%M:%S') if not os.path.exists(dstpath): os.makedirs(dstpath) shutil.copyfile( srcpath + '/' + filename, dstpath + '/' + filename + '.' + label ) def backup_folder(srcpath, foldername, dstpath, label=None): """ same to backup_file except it's a FOLDER not a FILE. """ if label is None: label = time.strftime('%H:%M:%S') if not os.path.exists(dstpath): os.makedirs(dstpath) os.rename( '%s/%s' % (srcpath, foldername), '%s/%s' % (dstpath, foldername + '.' + label) ) def is_path_contain_file(dstpath, dstfile, recursive=False, follow_link=False): """ use contains_file instead. Kept still for compatibility purpose """ return contains_file(dstpath, dstfile, recursive, follow_link) def contains_file(dstpath, expected_name, recursive=False, follow_link=False): """ judge if the dstfile is in dstpath :param dstpath: search path :param dstfile: file :param recursive: search recursively or not. False by default. :return: return True on success, False otherwise """ path = os.path.normpath(dstpath) fpath = os.path.normpath(expected_name.strip()) fullpath = '{0}/{1}'.format(path, expected_name.strip()) fullpath = os.path.normpath(fullpath) if recursive: for (_, __, fnames) in os.walk(path, followlinks=follow_link): for filename in fnames: if filename == fpath: return True return False else: if os.path.exists(fullpath): return True else: return False def is_port_used(port): """ judge if the port is used or not (It's not 100% sure as next second, some other process may steal the port as soon after this function returns) :platform: linux only (netstat command used inside) :param port: expected port :return: return True if the port is used, False otherwise """ @decorators.needlinux def __is_port_used(port): """internal func""" cmd = "netstat -nl | grep ':%s '" % (port) ret = cup.shell.ShellExec().run(cmd, 10) if 0 != ret['returncode']: return False stdout = ret['stdout'].strip() if 0 == len(stdout): return False else: return True return __is_port_used(port) def is_process_used_port(process_path, port): """ judge if a process is using the port :param process_path: process current working direcotry (cwd) :return: Return True if process matches """ # find the pid from by port cmd = "netstat -nlp | grep ':%s '|awk -F ' ' '{print $7}'|\ cut -d \"/\" -f1" % (port) ret = cup.shell.ShellExec().run(cmd, 10) if 0 != ret['returncode']: return False stdout = ret['stdout'].strip() if 0 == len(stdout): return False dst_pid = stdout.strip() # check the path path = os.path.abspath(process_path) for sel_path in ['exe', 'cwd']: cmd = 'ls -l /proc/%s/%s|awk \'{print $11}\' ' % (dst_pid, sel_path) ret = cup.shell.ShellExec().run(cmd, 10) pid_path = ret['stdout'].strip().strip() if 0 == pid_path.find(path): return True return False class Asynccontent(object): """ make a Argcontent to async_run u have to del it after using it """ def __init__(self): self.cmd = None self.timeout = None self.pid = None self.ret = None self.child_list = [] self.__cmdthd = None self.__monitorthd = None self.__subpro = None class ShellExec(object): # pylint: disable=R0903 """ For shell command execution. :: from cup import shell shellexec = shell.ShellExec() # timeout=None will block the execution until it finishes shellexec.run('/bin/ls', timeout=None) # timeout>=0 will open non-blocking mode # The process will be killed if the cmd timeouts shellexec.run(cmd='/bin/ls', timeout=100) """ def __init__(self): self._subpro = None self._subpro_data = None def __kill_process(self, pid): os.kill(pid, signal.SIGKILL) def kill_all_process(self, async_content): """ to kill all process """ for pid in async_content.child_list: self.__kill_process(pid) def get_async_run_status(self, async_content): """ get the command's status """ try: from cup.res import linux async_process = linux.Process(async_content.pid) res = async_process.get_process_status() except err.NoSuchProcess: res = "process is destructor" return res def get_async_run_res(self, async_content): """ if the process is still running the res shoule be None,None,0 """ return async_content.ret def async_run(self, cmd, timeout): """ async_run return a dict {uuid:pid} self.argcontent{cmd,timeout,ret,cmdthd,montor} timeout:returncode:999 cmd is running returncode:-999 """ def _signal_handle(): """ signal setup """ signal.signal(signal.SIGPIPE, signal.SIG_DFL) def _target(argcontent): argcontent.__subpro = subprocess.Popen( argcontent.cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=_signal_handle) from cup.res import linux parent = linux.Process(argcontent.__subpro.pid) children = parent.children(True) ret_dict = [] for process in children: ret_dict.append(process) argcontent.child_list = ret_dict def _monitor(start_time, argcontent): while(int(time.mktime(datetime.datetime.now().timetuple())) - int(start_time) < int(argcontent.timeout)): time.sleep(1) if argcontent.__subpro.poll() is not None: self._subpro_data = argcontent.__subpro.communicate() argcontent.ret['returncode'] = argcontent.__subpro.returncode argcontent.ret['stdout'] = self._subpro_data[0] argcontent.ret['stderr'] = self._subpro_data[1] return str_warn = ( 'Shell "%s"execution timout:%d. To kill it' % (argcontent.cmd, argcontent.timeout) ) argcontent.__subpro.terminate() argcontent.ret['returncode'] = 999 argcontent.ret['stderr'] = str_warn for process in argcontent.child_list: self.__kill_process(process) del argcontent.child_list[:] argcontent = Asynccontent() argcontent.cmd = cmd argcontent.timeout = timeout argcontent.ret = { 'stdout': None, 'stderr': None, 'returncode': -999 } argcontent.__cmdthd = threading.Thread(target=_target, args=(argcontent,)) argcontent.__cmdthd.start() start_time = int(time.mktime(datetime.datetime.now().timetuple())) argcontent.__cmdthd.join(0.1) argcontent.pid = argcontent.__subpro.pid argcontent.__monitorthd = threading.Thread(target=_monitor, args=(start_time, argcontent)) argcontent.__monitorthd.start() #this join should be del if i can make if quicker in Process.children argcontent.__cmdthd.join(0.5) return argcontent def run(self, cmd, timeout): """ refer to the class description :param timeout: If the cmd is not returned after [timeout] seconds, the cmd process will be killed. If timeout is None, will block there until the cmd execution returns :return: returncode == 0 means success, while 999 means timeout { 'stdout' : 'Success', 'stderr' : None, 'returncode' : 0 } E.g. :: import cup shelltool = cup.shell.ShellExec() print shelltool.run('/bin/ls', timeout=1) """ def _signal_handle(): """ signal setup """ signal.signal(signal.SIGPIPE, signal.SIG_DFL) def _target(cmd): self._subpro = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=_signal_handle ) self._subpro_data = self._subpro.communicate() ret = { 'stdout': None, 'stderr': None, 'returncode': 0 } cmdthd = threading.Thread(target=_target, args=(cmd, )) cmdthd.start() cmdthd.join(timeout) if cmdthd.isAlive() is True: str_warn = ( 'Shell "%s"execution timout:%d. To kill it' % (cmd, timeout) ) warnings.warn(str_warn, RuntimeWarning) self._subpro.terminate() ret['returncode'] = 999 ret['stderr'] = str_warn else: self._subpro.wait() times = 0 while self._subpro.returncode is None and times < 10: time.sleep(1) times += 1 ret['returncode'] = self._subpro.returncode assert type(self._subpro_data) == tuple, \ 'self._subpro_data should be a tuple' ret['stdout'] = self._subpro_data[0] ret['stderr'] = self._subpro_data[1] return ret def _do_execshell(cmd, b_printcmd=True, timeout=None): """ do execshell """ if timeout is not None and timeout < 0: raise cup.err.ShellException( 'timeout should be None or >= 0' ) if b_printcmd is True: print 'To exec cmd:%s' % cmd shellexec = ShellExec() return shellexec.run(cmd, timeout) def execshell(cmd, b_printcmd=True, timeout=None): """ 执行shell命令,返回returncode """ return _do_execshell( cmd, b_printcmd=b_printcmd, timeout=timeout)['returncode'] def execshell_withpipe(cmd): """ Deprecated. Use ShellExec instead """ res = os.popen(cmd) return res def execshell_withpipe_ex(cmd, b_printcmd=True): """ Deprecated. Recommand using ShellExec. """ strfile = '/tmp/%s.%d.%d' % ( 'shell_env.py', int(os.getpid()), random.randint(100000, 999999) ) os.mknod(strfile) cmd = cmd + ' 1>' + strfile + ' 2>/dev/null' os.system(cmd) if True == b_printcmd: print cmd fphandle = open(strfile, 'r') lines = fphandle.readlines() fphandle.close() os.unlink(strfile) return lines def execshell_withpipe_str(cmd, b_printcmd=True): """ Deprecated. Recommand using ShellExec. """ return ''.join(execshell_withpipe_ex(cmd, b_printcmd)) def execshell_withpipe_exwitherr(cmd, b_printcmd=True): """ Deprecated. Recommand using ShellExec. """ strfile = '/tmp/%s.%d.%d' % ( 'shell_env.py', int(os.getpid()), random.randint(100000, 999999) ) cmd = cmd + ' >' + strfile cmd = cmd + ' 2>&1' os.system(cmd) if b_printcmd: print cmd fhandle = open(strfile, 'r') lines = fhandle.readlines() fhandle.close() os.unlink(strfile) return lines def is_proc_alive(procname, is_whole_word=False, is_server_tag=False, filters=False): """ Deprecated. Recommand using cup.oper.is_proc_exist """ # print procName if is_whole_word: cmd = "ps -ef|grep -w '%s'$ |grep -v grep" % procname else: cmd = "ps -ef|grep -w '%s' |grep -v grep" % procname if is_server_tag: cmd += '|grep -vwE "vim |less |vi |tail |cat |more "' if filters: if isinstance(filters, str): cmd += "|grep -v '%s'" % filters elif isinstance(filters, list): for _, task in enumerate(filters): cmd += "|grep -v '%s'" % task cmd += '|wc -l' rev = execshell_withpipe_str(cmd, False) if int(rev) > 0: return True else: return False def forkexe_shell(cmd): """ fork a new process to execute cmd (os.system(cmd)) """ try: pid = os.fork() if pid > 0: return except OSError: sys.exit(1) # os.chdir("/") os.setsid() # os.umask(0) try: pid = os.fork() if pid > 0: sys.exit(0) except OSError: sys.exit(1) os.system(cmd) def md5file(filename): """ compute md5 hex value of a file, return with a string (hex-value) """ if os.path.exists(filename) is False: raise IOError('No such file: %s' % filename) with open(filename, 'rb') as fhandle: md5obj = hashlib.md5() while True: strtmp = fhandle.read(131072) # read 128k one time if len(strtmp) <= 0: break md5obj.update(strtmp) return md5obj.hexdigest() def kill9_byname(strname): """ kill -9 process by name """ fd_pid = os.popen("ps -ef | grep -v grep |grep %s \ |awk '{print $2}'" % (strname)) pids = fd_pid.read().strip().split('\n') fd_pid.close() for pid in pids: os.system("kill -9 %s" % (pid)) def kill_byname(strname): """ kill process by name """ fd_pid = os.popen("ps -ef | grep -v grep |grep %s \ |awk '{print $2}'" % (strname)) pids = fd_pid.read().strip().split('\n') fd_pid.close() for pid in pids: os.system("kill -s SIGKILL %s" % (pid)) def del_if_exist(path, safemode=True): """ delete the path if it exists, cannot delete root / under safemode """ if safemode and path == '/': raise IOError('Cannot delete root path /') if os.path.lexists(path) is False: return -1 if os.path.isdir(path): shutil.rmtree(path) elif os.path.isfile(path) or os.path.islink(path): os.unlink(path) else: raise IOError('Does not support deleting the type 4 the path') def rmtree(path, ignore_errors=False, onerror=None, safemode=True): """ safe rmtree. safemode, by default is True, which forbids: 1. not allowing rmtree root "/" """ if safemode: if os.path.normpath(os.path.abspath(path)) == '/': raise err.ShellException('cannot rmtree root / under safemode') if os.path.isfile(path): return os.unlink(path) else: return shutil.rmtree(path, ignore_errors, onerror) def shell_diff(srcfile, dstfile): """ shell diff two files, return 0 if it's the same. """ cmd = 'diff %s %s' % (srcfile, dstfile) return os.system(cmd) def get_pid(process_path, grep_string): """ will return immediately after find the pid which matches 1. ps -ef|grep %s|grep -v grep|grep -vE "^[vim|less|vi|tail|cat|more] " '|awk '{print $2}' 2. workdir is the same as ${process_path} :param process_path: process that runs on :param grep_string: ps -ef|grep ${grep_string} :return: return None if not found. Otherwise, return the pid """ cmd = ( 'ps -ef|grep \'%s\'|grep -v grep|grep -vwE "vim |less |vi |tail |cat |more "' '|awk \'{print $2}\'' ) % (grep_string) ret = cup.shell.ShellExec().run(cmd, 10) pids = ret['stdout'].strip().split('\n') if len(pids) == 0 or len(pids) == 1 and len(pids[0]) == 0: return None for pid in pids: for sel_path in ["cwd", "exe"]: cmd = 'ls -l /proc/%s/%s|awk \'{print $11}\' ' % (pid, sel_path) ret = cup.shell.ShellExec().run(cmd, 10) pid_path = ret['stdout'].strip().strip() if pid_path.find(process_path) == 0: return pid return None # end linux functionalities }} # vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
[ "maguannan@baidu.com" ]
maguannan@baidu.com
510b53b63d95c89119527ad6b488ba97bd782baf
8862a4405df395c88ec79167c580648462668e71
/Test.py
1f7e7949335f34c1935043dbe10dd188b3b63bd8
[]
no_license
prateeksarangi/Rough_Code_Practice
f0fca186ea7d75000650901231e7a6ae91b603c1
7d6014f06d8c3296f56ef6767a0783c5e4490011
refs/heads/master
2022-12-23T14:50:22.182635
2021-03-04T04:18:17
2021-03-04T04:18:17
197,002,033
0
0
null
2022-12-13T09:40:25
2019-07-15T13:15:48
JavaScript
UTF-8
Python
false
false
482
py
import math import os import random import re import sys # Complete the maxMin function below. def maxMin(k, arr): arr.sort() return arr[k-1] - arr[0] if __name__ == '__main__': #fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) k = int(input()) arr = [] for _ in range(n): arr_item = int(input()) arr.append(arr_item) result = maxMin(k, arr) #fptr.write(str(result) + '\n') #fptr.close() print(result)
[ "sarangiprateek80@gmail.com" ]
sarangiprateek80@gmail.com
a860c888aaa57bcc2ac829ceec0b974d2dcca974
05a3858cbedb092a188357a2a896020b98f65e99
/database/db_hero.py
e2a521c8799270baa3ba981e1730fd020e6e5f73
[]
no_license
694035434/Official-game-website
89d7dc0b151d2fd2fca9ab7f43870e88153be38c
a0245df98a5466db2fabaf82dd0d51f383bbee01
refs/heads/master
2022-07-12T19:17:36.082586
2020-05-15T16:21:44
2020-05-15T16:21:44
264,222,252
0
0
null
null
null
null
UTF-8
Python
false
false
1,239
py
from database.db_util import Db_util class Hero(): def __init__(self): self.db=Db_util() self.conn=self.db.conn self.cursor=self.db.cursor def insert(self,name,img,skill,story): sql="insert into t_hero(h_name,h_photo,h_skill,h_story) VALUES ('%s','%s','%s','%s')"%(name,img,skill,story) v = self.cursor.execute(sql) self.conn.commit() self.db.close() return v def hero_select(self): sql = "SELECT * FROM t_hero" self.cursor.execute(sql) result = self.cursor.fetchall() # for r1 in result: self.conn.commit() self.db.close() return result def hero_select_one(self,id): sql = "SELECT * FROM t_hero WHERE h_id=%d"%(int(id)) self.cursor.execute(sql) result = self.cursor.fetchall() # for r1 in result: self.conn.commit() self.db.close() return result def select_limit(self): sql="select * from t_hero limit 4" self.cursor.execute(sql) result = self.cursor.fetchall() # for r1 in result: self.conn.commit() self.db.close() return result
[ "694035434@qq.com" ]
694035434@qq.com
71eda8ebf13d74adada1b21721f5e0965ad1f24c
5a64f117df8580cda2990db2e6c3c7f22a1783db
/menes/trunk/cyazon/cyazon.py
f388cd0c437294ba156d5168f15fc00a77e44af9
[]
no_license
xcode2010/saurik-svn-mirror
5fa87e01a052ed1171c716fe121dc1a8b4d6c7dd
42aaa83d383dc85faa51733d7da3d2bc3c82de9e
refs/heads/master
2021-12-12T12:15:03.477067
2017-01-06T10:44:46
2017-01-06T10:44:46
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,740
py
# Cyazon - Python Amazon FPS Client # Copyright (C) 2009 Jay Freeman (saurik) # Redistribution and use in source and binary # forms, with or without modification, are permitted # provided that the following conditions are met: # # 1. Redistributions of source code must retain the # above copyright notice, this list of conditions # and the following disclaimer. # 2. Redistributions in binary form must reproduce the # above copyright notice, this list of conditions # and the following disclaimer in the documentation # and/or other materials provided with the # distribution. # 3. The name of the author may not be used to endorse # or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import base64, hashlib, hmac, urllib, urllib2 from datetime import datetime from xml.dom import minidom def undom(dom): children = dom.childNodes if len(children) == 0: return None elif len(children) == 1 and children[0].localName == None: return children[0].nodeValue else: node = {} for child in dom.childNodes: name = child.localName if name != None: value = undom(child) #node.setdefault(name, []).append(value) node[name] = value return node def parse(file): return undom(minidom.parse(file)) def sign_(set, secret): return base64.b64encode(hmac.new(secret, ''.join( item[2] + str(item[1]) for item in sorted(( item[0].lower(), item[1], item[0] ) for item in set.items()) ), hashlib.sha1).digest()) def sign(set, secret): return sign_(set, secret) class AWSError(Exception): def __init__(self, code, message): self.code = code self.message = message def __str__(self): return str(self.code) + ': ' + str(self.message) def merge_(params, **args): for item in args.items(): if isinstance(item[1], dict): super = item[0] for sub in item[1].items(): params[super + '.' + sub[0]] = sub[1] else: params[item[0]] = str(item[1]) class connect: def __init__(self, access, secret): self.access = access self.secret = secret def sign(self, set): return sign_(set, self.secret) def pcb(self, ref, limit, next, **args): params = { 'callerKey': self.access, 'callerReference': ref, 'globalAmountLimit': limit, 'pipelineName': 'MultiUse', 'returnURL': next, 'version': '2009-01-09', } merge_(params, **args) params['awsSignature'] = sign(params, self.secret) return 'https://authorize.payments.amazon.com/cobranded-ui/actions/start?' + urllib.urlencode(params) #return 'https://authorize.payments-sandbox.amazon.com/cobranded-ui/actions/start?' + urllib.urlencode(params) def fps(self, action, **args): params = { 'Action': action, 'AWSAccessKeyId': self.access, 'SignatureVersion': 1, 'Timestamp': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'), 'Version': '2008-09-17', } merge_(params, **args) params['Signature'] = sign(params, self.secret) try: #url = urllib2.urlopen('https://fps.sandbox.amazonaws.com/?' + urllib.urlencode(params)) url = urllib2.urlopen('https://fps.amazonaws.com/?' + urllib.urlencode(params)) except urllib2.HTTPError, e: try: dom = parse(e) finally: e.close() err = dom['Response']['Errors']['Error'] raise AWSError(err['Code'], err['Message']) try: dom = parse(url) finally: url.close() response = dom[action + 'Response'] name = action + 'Result' return response[name] if name in response else None
[ "286674783@qq.com" ]
286674783@qq.com
a4fe8badfa3fa74eb867fa7cf3b3eea6a75102cb
b094155ee50d0ddd8b8703955a968d8bed77b25c
/appprincipal/vendas/urls.py
1e1d649c2aa8a638435b0763ac1f2a61c7130ac9
[]
no_license
Netovoski/padariadjango
704b1f3340625ac81e5f9a6d629ec39a3b68c33d
f4850f02f2ab4de45f6b8d5fa7c1b36fd2eca407
refs/heads/master
2023-02-14T16:34:41.203758
2020-12-23T13:55:01
2020-12-23T13:55:01
290,487,895
2
0
null
2020-08-26T12:18:07
2020-08-26T12:18:07
null
UTF-8
Python
false
false
767
py
from django.views.generic import View, TemplateView, CreateView, UpdateView, ListView, DeleteView from django.urls import path from . import views from appprincipal.vendas.views import * app_name = 'vendas' urlpatterns = [ path('addvenda/', views.venda_create, name='add_venda'), path('novavenda/<int:idvenda2>', NovavendaView.as_view(), name ="nova_venda"), path('novavenda/excluir/<pk>',views.DeletarUnid, name='deletarunid'), path('minhasvendas/', CompraListView.as_view(), name='minhasvendas'), path('minhasvendas/excluir/<pk>', VendaDeleteView.as_view(), name='deletarvenda'), #path('compras/<pk>', views.finalizaCompra, name = 'compra_produtos'), path('compras/', FinalizaCompraListView.as_view(), name = 'compra_produtos'), ]
[ "netovoski@gmail.com" ]
netovoski@gmail.com
d6246ef699c47ce3d1bbcf801835f3fac4236d8f
3395a234e7c80d011607e79c49cd48bf516f256b
/dependencies/jedi/third_party/typeshed/third_party/2and3/werkzeug/test.pyi
764b76d8e78bf0f973af8fbcc504bf8dfd79edde
[ "MIT", "Apache-2.0" ]
permissive
srusskih/SublimeJEDI
67329b72e184bc9584843968dcc534a002c797a1
95c185d778425c04536d53517b0e3fe6dedf8e59
refs/heads/master
2023-08-24T11:30:37.801834
2022-08-30T09:04:17
2022-08-30T09:04:17
6,241,108
669
125
MIT
2022-08-30T09:04:18
2012-10-16T08:23:57
Python
UTF-8
Python
false
false
5,957
pyi
import sys from wsgiref.types import WSGIEnvironment from typing import Any, Generic, Optional, Text, Tuple, Type, TypeVar, overload from typing_extensions import Literal if sys.version_info < (3,): from urllib2 import Request as U2Request from cookielib import CookieJar else: from urllib.request import Request as U2Request from http.cookiejar import CookieJar def stream_encode_multipart(values, use_tempfile: int = ..., threshold=..., boundary: Optional[Any] = ..., charset: Text = ...): ... def encode_multipart(values, boundary: Optional[Any] = ..., charset: Text = ...): ... def File(fd, filename: Optional[Any] = ..., mimetype: Optional[Any] = ...): ... class _TestCookieHeaders: headers: Any def __init__(self, headers): ... def getheaders(self, name): ... def get_all(self, name, default: Optional[Any] = ...): ... class _TestCookieResponse: headers: Any def __init__(self, headers): ... def info(self): ... class _TestCookieJar(CookieJar): def inject_wsgi(self, environ): ... def extract_wsgi(self, environ, headers): ... class EnvironBuilder: server_protocol: Any wsgi_version: Any request_class: Any charset: Text path: Any base_url: Any query_string: Any args: Any method: Any headers: Any content_type: Any errors_stream: Any multithread: Any multiprocess: Any run_once: Any environ_base: Any environ_overrides: Any input_stream: Any content_length: Any closed: Any def __init__(self, path: str = ..., base_url: Optional[Any] = ..., query_string: Optional[Any] = ..., method: str = ..., input_stream: Optional[Any] = ..., content_type: Optional[Any] = ..., content_length: Optional[Any] = ..., errors_stream: Optional[Any] = ..., multithread: bool = ..., multiprocess: bool = ..., run_once: bool = ..., headers: Optional[Any] = ..., data: Optional[Any] = ..., environ_base: Optional[Any] = ..., environ_overrides: Optional[Any] = ..., charset: Text = ...): ... form: Any files: Any @property def server_name(self): ... @property def server_port(self): ... def __del__(self): ... def close(self): ... def get_environ(self): ... def get_request(self, cls: Optional[Any] = ...): ... class ClientRedirectError(Exception): ... # Response type for the client below. # By default _R is Tuple[Iterable[Any], Union[Text, int], datastructures.Headers] _R = TypeVar('_R') class Client(Generic[_R]): application: Any response_wrapper: Optional[Type[_R]] cookie_jar: Any allow_subdomain_redirects: Any def __init__(self, application, response_wrapper: Optional[Type[_R]] = ..., use_cookies: bool = ..., allow_subdomain_redirects: bool = ...): ... def set_cookie(self, server_name, key, value: str = ..., max_age: Optional[Any] = ..., expires: Optional[Any] = ..., path: str = ..., domain: Optional[Any] = ..., secure: Optional[Any] = ..., httponly: bool = ..., charset: Text = ...): ... def delete_cookie(self, server_name, key, path: str = ..., domain: Optional[Any] = ...): ... def run_wsgi_app(self, environ, buffered: bool = ...): ... def resolve_redirect(self, response, new_location, environ, buffered: bool = ...): ... @overload def open(self, *args, as_tuple: Literal[True], **kwargs) -> Tuple[WSGIEnvironment, _R]: ... @overload def open(self, *args, as_tuple: Literal[False] = ..., **kwargs) -> _R: ... @overload def open(self, *args, as_tuple: bool, **kwargs) -> Any: ... @overload def get(self, *args, as_tuple: Literal[True], **kw) -> Tuple[WSGIEnvironment, _R]: ... @overload def get(self, *args, as_tuple: Literal[False] = ..., **kw) -> _R: ... @overload def get(self, *args, as_tuple: bool, **kw) -> Any: ... @overload def patch(self, *args, as_tuple: Literal[True], **kw) -> Tuple[WSGIEnvironment, _R]: ... @overload def patch(self, *args, as_tuple: Literal[False] = ..., **kw) -> _R: ... @overload def patch(self, *args, as_tuple: bool, **kw) -> Any: ... @overload def post(self, *args, as_tuple: Literal[True], **kw) -> Tuple[WSGIEnvironment, _R]: ... @overload def post(self, *args, as_tuple: Literal[False] = ..., **kw) -> _R: ... @overload def post(self, *args, as_tuple: bool, **kw) -> Any: ... @overload def head(self, *args, as_tuple: Literal[True], **kw) -> Tuple[WSGIEnvironment, _R]: ... @overload def head(self, *args, as_tuple: Literal[False] = ..., **kw) -> _R: ... @overload def head(self, *args, as_tuple: bool, **kw) -> Any: ... @overload def put(self, *args, as_tuple: Literal[True], **kw) -> Tuple[WSGIEnvironment, _R]: ... @overload def put(self, *args, as_tuple: Literal[False] = ..., **kw) -> _R: ... @overload def put(self, *args, as_tuple: bool, **kw) -> Any: ... @overload def delete(self, *args, as_tuple: Literal[True], **kw) -> Tuple[WSGIEnvironment, _R]: ... @overload def delete(self, *args, as_tuple: Literal[False] = ..., **kw) -> _R: ... @overload def delete(self, *args, as_tuple: bool, **kw) -> Any: ... @overload def options(self, *args, as_tuple: Literal[True], **kw) -> Tuple[WSGIEnvironment, _R]: ... @overload def options(self, *args, as_tuple: Literal[False] = ..., **kw) -> _R: ... @overload def options(self, *args, as_tuple: bool, **kw) -> Any: ... @overload def trace(self, *args, as_tuple: Literal[True], **kw) -> Tuple[WSGIEnvironment, _R]: ... @overload def trace(self, *args, as_tuple: Literal[False] = ..., **kw) -> _R: ... @overload def trace(self, *args, as_tuple: bool, **kw) -> Any: ... def create_environ(*args, **kwargs): ... def run_wsgi_app(app, environ, buffered: bool = ...): ...
[ "srusskih@users.noreply.github.com" ]
srusskih@users.noreply.github.com
ec0a0bef48d97751f1051798d55b81d8ca117f5c
94899dff297d3985c70c6ea9e897130ca3ef6527
/DataProcessing/interest_similarity_scores.py
7f6fb7feb0025365eb55baf3ceb8bc3a352ad1d7
[ "MIT" ]
permissive
fenekku/Masters
a6516430752c7dac1f0b6a1a18db0f74b86bfaa7
32ce3f9ae22893c7777262cfc7aa215345d68ad5
refs/heads/master
2020-06-06T02:01:34.855231
2014-11-12T16:11:55
2014-11-12T16:11:55
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,393
py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Generate similarity interest score matrix """ import numpy as np from sklearn.metrics.pairwise import cosine_similarity from helpers import repositories_interested_by def get_interest_similarity_scores(u_rs, nb_rs, state, similarity="cos"): """Return ndarray of the maximum `similarity` scores of each `nb_rs` wrt the repositories `uidx` is interested in at `state`. """ if len(u_rs) == 0: #training time equal to validation time -> rare as it implies #several actions occurred at the same second #ignore them for now (only 8 of them) return np.array([]) r_u_csr = state.transpose().astype(np.bool).astype(np.int).tocsr() if similarity == "cos": cos_sim = cosine_similarity(r_u_csr[nb_rs,:], r_u_csr[u_rs,:]) max_similarity_scores = cos_sim.max(axis=1) return max_similarity_scores if __name__ == "__main__": pass # from argparse import ArgumentParser # import time # import sys # from nose.tools import eq_, ok_ # sys.path.append(abspath(join("..", "Utilities"))) # from paths import VU_TO_I_FN, PROCESSED_DATA_DIR, RECOMMENDATION_TIMES_FN # from paths import TIMED_INTERESTS_FN, VR_TO_I_FN # from indexer import Indexer # from general import get_matrix_before_t # argparser = ArgumentParser() # argparser.add_argument('version', help="dataset version") # args = argparser.parse_args() # dataset_dir = join(PROCESSED_DATA_DIR, "test", args.version) # u_to_i = Indexer(join(dataset_dir, VU_TO_I_FN)) # print "u_to_i", u_to_i # r_to_i = Indexer(join(dataset_dir, VR_TO_I_FN)) # print "r_to_i", r_to_i # real_user = 1 # uidx = u_to_i[real_user] # print "{} (real)-> {} (idx)-> {} (mm idx)".format(real_user, uidx, uidx+1) # past = time.time() # rt = np.load(join(dataset_dir,RECOMMENDATION_TIMES_FN)) # print "rt[{}] = {}".format(uidx,rt[uidx]) # state = get_matrix_before_t(mmread(join(dataset_dir,TIMED_INTERESTS_FN)).transpose(), # rt[uidx]).tocsr() # s = get_interest_similarity_scores(uidx, state, dataset_dir, similarity="cos") # print "sim for uidx {}".format(uidx) # print s # eq_(s.nnz, 8) # print "Wall clock time for ASIM: {:.3f} s".format(time.time() - past) # print "Tests pass"
[ "guillaume.viger@mail.mcgill.ca" ]
guillaume.viger@mail.mcgill.ca
df6b6f8f850dfdbf0072fbe2a9ec784ec46c1b31
a2a535e005dc73a7baf27595c517e2c31e9483d1
/dags/sparkdag.py
5b5e6b618904df33e63337d63b1387da7ea9263d
[ "Apache-2.0" ]
permissive
d3vzer0/airflow-training-skeleton
f35a7f0f6ca26e91e7d742e109b8d1185ecce3c7
938a4035fbbc6cd4af8e9618c1c6df7b9f8929be
refs/heads/master
2020-12-26T05:19:49.071978
2020-02-14T15:31:58
2020-02-14T15:31:58
237,397,813
0
0
Apache-2.0
2020-01-31T09:25:18
2020-01-31T09:25:18
null
UTF-8
Python
false
false
2,081
py
# -*- coding: utf-8 -*- import airflow import datetime from datetime import timedelta from airflow.models import DAG from airflow.contrib.operators.postgres_to_gcs_operator import PostgresToGoogleCloudStorageOperator from airflow.contrib.operators.dataproc_operator import DataprocClusterCreateOperator from airflow.contrib.operators.dataproc_operator import DataprocClusterDeleteOperator from airflow.contrib.operators.dataproc_operator import DataProcPySparkOperator from operators.http_to_gcs import HttpToGcsOperator import pendulum from datetime import datetime args = { 'owner': 'JDreijer', 'start_date': datetime(2019, 11, 26), 'end_date': datetime(2019, 11, 28), 'schedule_interval': '@daily', 'depends_on_past': False } dag = DAG( dag_id='sparkdag', default_args=args, ) t1 = PostgresToGoogleCloudStorageOperator( task_id='from_pg_to_gcs', postgres_conn_id='postgres_default', sql='select * from land_registry_price_paid_uk WHERE transfer_date = \'{{ ds }}\'', bucket='spark_bucket_jd', filename='postgres-transfers-{{ ds }}.csv', google_cloud_storage_conn_id='google_cloud_storage_default', dag=dag ) t2 = HttpToGcsOperator( task_id='from_api_to_gcs', endpoint='history?start_at={{ yesterday_ds }}&end_at={{ ds }}&symbols=EUR&base=GBP', gcs_path='api-exchangerate-{{ ds }}.json', gcs_bucket='spark_bucket_jd', http_conn_id='exchange_rates_api', dag=dag ) t3 = DataprocClusterCreateOperator( task_id='create_dataproc', cluster_name='analyse-pricing-{{ ds }}', project_id='afspfeb3-07be9fd3ffa2687ea1891', num_workers=2, zone='europe-west4-a', dag=dag ) t4 = DataProcPySparkOperator( dag=dag, task_id='run_spark_job', main='gs://spark_bucket_jd/build_statistics.py', cluster_name='analyse-pricing-{{ ds }}', job_name='analyse-pricing' ) tx = DataprocClusterDeleteOperator( dag=dag, task_id='delete_dataproc', cluster_name='analyse-pricing-{{ ds }}', project_id='afspfeb3-07be9fd3ffa2687ea1891' ) [t1, t2] >> t3 >> t4 >> tx
[ "jdreijer@schubergphilis.com" ]
jdreijer@schubergphilis.com
5596faf42b17bba57856e691b2314449fe742aa5
2986d9712ffd124f38a7026fc419aedd6cf8fb6b
/main/migrations/0004_auto_20191130_2302.py
e0f1243324a73f6f261517fe22958d1d2aad1919
[]
no_license
leandroReyes/proyectodjango
a400206fa5e1c242b04e49e91cd887f5351e9e68
add2ab6aeddbe89264472e147630284ebba6c772
refs/heads/master
2023-04-26T15:50:03.546801
2019-12-08T03:03:13
2019-12-08T03:03:13
226,537,469
0
0
null
2023-04-21T20:41:31
2019-12-07T15:46:50
JavaScript
UTF-8
Python
false
false
539
py
# Generated by Django 2.2.7 on 2019-11-30 23:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0003_auto_20191130_2301'), ] operations = [ migrations.AlterField( model_name='paciente', name='edad', field=models.IntegerField(), ), migrations.AlterField( model_name='pninno', name='estatura', field=models.FloatField(), ), ]
[ "leandro.reyes.castillo@gmail.com" ]
leandro.reyes.castillo@gmail.com
7989a8fa946a7e31f630ae9c36202f2e4bb708f5
b2b88e602e7354c0d1d32a1f27fa148862884128
/gad.py
2f95325690fb91b3e0e8077bd92e445384fa0e09
[]
no_license
pramod-Paratabadi/Gender_AgePrediction
897f5c0fc865524b8d21b37d1b3049281926100f
2794231548e28b1df23783f3736a81829a5f7966
refs/heads/master
2022-04-26T01:32:09.604919
2020-04-27T03:23:00
2020-04-27T03:23:00
259,193,266
0
0
null
null
null
null
UTF-8
Python
false
false
2,626
py
import cv2 import math import argparse def highlightFace(net, frame, conf_threshold=0.7): frameOpencvDnn=frame.copy() frameHeight=frameOpencvDnn.shape[0] frameWidth=frameOpencvDnn.shape[1] blob=cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False) net.setInput(blob) detections=net.forward() faceBoxes=[] for i in range(detections.shape[2]): confidence=detections[0,0,i,2] if confidence>conf_threshold: x1=int(detections[0,0,i,3]*frameWidth) y1=int(detections[0,0,i,4]*frameHeight) x2=int(detections[0,0,i,5]*frameWidth) y2=int(detections[0,0,i,6]*frameHeight) faceBoxes.append([x1,y1,x2,y2]) cv2.rectangle(frameOpencvDnn, (x1,y1), (x2,y2), (0,255,0), int(round(frameHeight/150)), 8) return frameOpencvDnn,faceBoxes # parser=argparse.ArgumentParser() # parser.add_argument('--image') # args=parser.parse_args() faceProto="opencv_face_detector.pbtxt" faceModel="opencv_face_detector_uint8.pb" ageProto="age_deploy.prototxt" ageModel="age_net.caffemodel" genderProto="gender_deploy.prototxt" genderModel="gender_net.caffemodel" MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 114.895847746) ageList=['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)'] genderList=['Male','Female'] faceNet=cv2.dnn.readNet(faceModel,faceProto) ageNet=cv2.dnn.readNet(ageModel,ageProto) genderNet=cv2.dnn.readNet(genderModel,genderProto) video=cv2.VideoCapture("sp.jpg") padding=20 while cv2.waitKey(1)<0: hasFrame,frame=video.read() if not hasFrame: cv2.waitKey() break resultImg,faceBoxes=highlightFace(faceNet,frame) if not faceBoxes: print("No face detected") for faceBox in faceBoxes: face=frame[max(0,faceBox[1]-padding): min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding) :min(faceBox[2]+padding, frame.shape[1]-1)] blob=cv2.dnn.blobFromImage(face, 1.0, (227,227), MODEL_MEAN_VALUES, swapRB=False) genderNet.setInput(blob) genderPreds=genderNet.forward() gender=genderList[genderPreds[0].argmax()] print(f'Gender: {gender}') ageNet.setInput(blob) agePreds=ageNet.forward() age=ageList[agePreds[0].argmax()] print(f'Age: {age[1:-1]} years') cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,255,255), 2, cv2.LINE_AA) cv2.imshow("Detecting age and gender", resultImg) cv2.waitKey()
[ "pramodp151999@gmail.com" ]
pramodp151999@gmail.com
8d99e844f456c608e8c0f84c06b8644e99e75ce6
2b20f132cbcb6f14f13ef3412302fb6926a506d9
/format_to_pandas.py
e32d9d0077abbf12178f1800159e3e2e9b439ea1
[ "Apache-2.0" ]
permissive
nfrumkin/forecast-prometheus
a200a140cf7705fcb8a9acf4f7a1c3f13e679c75
fae241ec4303992ed06df67cbbd8118622e9750b
refs/heads/master
2020-03-26T09:18:37.156900
2018-08-23T20:33:00
2018-08-23T20:33:00
144,744,292
119
26
null
null
null
null
UTF-8
Python
false
false
6,699
py
import json import pandas as pd import fnmatch import os import bz2 import pickle import argparse import gc # read files in list and convert to pandas dataframes def load_files(files, file_format): dfs = {} for file in files: # check file format and read appropriately if file_format == ".json": f = open(file, 'rb') else: f = bz2.BZ2File(file, 'rb') jsons = json.load(f) f.close() # iterate through packets in file for pkt in jsons: # create a new dataframe with packet timestamp and values df = pd.DataFrame.from_dict(pkt["y"]) df = df.rename( columns={0:"ds", 1:"y"}) df["ds"] = pd.to_datetime(df["ds"], unit='s') df = df.sort_values(by=["ds"]) df.y = pd.to_numeric(df['y'], errors='coerce') df = df.dropna() md = str(pkt["metric"]) # append generated dataframe and metadata to collection try: dfs[md] = dfs[md].append(df, ignore_index=True) except: dfs[md] = df return dfs # take a list of dataframes and their metadata and collapse to a # collection of unique time series (based on unique metadata) def collapse_to_unique(dfs_master, dfs_new): # iterate through metadata dfs_remaining = {} for md in dfs_new.keys(): try: # find metadata in our master list # if this throws an error, simply add it to the list dfs_master[md] = dfs_master[md].append(dfs_new[md], ignore_index=True) except: dfs_remaining[md] = dfs_new[md] return dfs_master, dfs_remaining # create pickle file containing data def save_checkpoint(pds, file): if file[-4:] != ".pkl": file = file + ".pkl" f = open(file, "wb") pickle.dump(pds, f) f.close() return file # load pickle file containing data def load_checkpoint(file): f = open(file, "rb") pds = pickle.load(f) f.close() return pds # load all files and convert to a list of pandas dataframes def convert_to_pandas(files, file_format, batch_size): checkpoints = [] # # separate files into batches batches = [files[batch_size*i:batch_size*(i+1)] for i in range(int(len(files)/batch_size) + 1)] print("num_batches", len(batches)) i = 0 for batch in batches: print(i) i += 1 # get new portion of dataframes and add to master set pds_new = load_files(batch, file_format) cp = save_checkpoint(pds_new, "raw_" + str(i)) checkpoints.append(cp) gc.collect() pds = [] # iterate checkpoint by checkpoint and add data to unique collection # of time series collapsed_fs = [] i = 0 for cp in checkpoints: i += 1 print(i) pds_new = load_checkpoint(cp) print(i) # load data in batches and combine dataframes for f in collapsed_fs: pds = load_checkpoint(f) pds, pds_new = collapse_to_unique(pds, pds_new) save_checkpoint(pds, f) gc.collect() if len(pds_new) > 0: f_new = save_checkpoint(pds_new, "collapsed_" + str(i)) print("Generated ", f_new) collapsed_fs.append(f_new) print(i) gc.collect() return pds # get all appropriately formatted files in a folder def retrieve_filenames(path, file_format): filenames = [] for file in os.listdir(path): # check if this file has correct ending (regex) if fnmatch.fnmatch(file, "*" + file_format): f_name = path + file filenames.append(f_name) return filenames # get main input arguments and return formatted data def read_input(data_folder, metric, file_format, batch_size): # metric-specific data folder folder = data_folder + metric + "/" # get all files in folder files = os.listdir(folder) # automatically detect metric type if "quantile" in files: metric_type = "summary" label = "quantile" filenames = retrieve_filenames(folder + "/quantile/", file_format) filenames_count = retrieve_filenames(folder + "/count/", file_format) filenames_sum = retrieve_filenames(folder + "/sum/", file_format) elif "bucket" in files: metric_type = "histogram" label = "le" filenames = retrieve_filenames(folder + "/bucket/", file_format) filenames_count = retrieve_filenames(folder + "/count/", file_format) filenames_sum = retrieve_filenames(folder + "/sum/", file_format) else: metric_type = "counter/gauge" label = "" filenames = retrieve_filenames(folder, file_format) pd_frames = convert_to_pandas(filenames, file_format, batch_size) return pd_frames # remove all temp pickle files generated during this program def combine_checkpoints(master_file): df = {} files = os.listdir() for file in files: if fnmatch.fnmatch(file, "collapsed_*.pkl"): try: f = open(file, "rb") dfs = pickle.load(f) f.close() df.update(dfs) except: continue os.system("rm " + file) elif fnmatch.fnmatch(file, "raw_*.pkl"): os.system("rm " + file) f = open(master_file + ".pkl", "wb") pickle.dump(df, f) f.close() def main(): print("Formatting Data") pd_frames = read_input(args.input, args.metric, args.format, args.batch_size) print("Conversion successful") master_file = args.output + args.metric combine_checkpoints(master_file) print("Saved data:", master_file) if __name__ == '__main__': parser = argparse.ArgumentParser(description="format time series data into an array of pandas dataframes. input folder architecture: input folder must contain a folder with the metric name. Inside the metric folder will be sum/, count/, quant/, or bucket/ according to the metric_type. ex: data/metric_name/files. data/ is input directory") parser.add_argument("--metric", type=str, help='metric name', required=True) parser.add_argument("--format", default='.json.bz2', help='choose input file format (.json.bz2 or .json)') parser.add_argument("-i", "--input", default='', help='input directory') parser.add_argument("-o", "--output", default='', help='output directory') parser.add_argument("--batch_size", default=1, type=int, help="number of data files to process at once. use this flag if handling big dataset (recommended: 20)") args = parser.parse_args() main()
[ "nfrumkin@redhat.com" ]
nfrumkin@redhat.com
3d11424b10d24e306887deedfdff84244709c173
e71a769c99648e99b14166000b4784c9dd573f5c
/Section 2 Django Introduction Project/first/settings.py
d7785d11e23b8bcef743488fb458e6aded039c3e
[ "BSD-3-Clause" ]
permissive
kmranrg/WebApp_BackendAPI_MobileApps_React_Django
70850be07f28c27aaf4828adf8995eeb463ceff4
d7e2d0f68ed3dda0127cb001a2e6d01327119eb9
refs/heads/master
2023-02-07T23:48:12.646350
2020-06-20T15:18:32
2020-06-20T15:18:32
245,768,863
1
2
BSD-3-Clause
2023-01-26T21:00:43
2020-03-08T07:10:59
Python
UTF-8
Python
false
false
3,272
py
""" Django settings for first project. Generated by 'django-admin startproject' using Django 3.0.3. For more information on this file, see https://docs.djangoproject.com/en/3.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'ak1(ow9*2*z-mghv8dvdl9lbrpb#tgos5#l(6)dci%b$dnj!f-' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', 'rest_framework.authtoken', 'demo', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'first.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['templates'], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'first.wsgi.application' # Database # https://docs.djangoproject.com/en/3.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.AllowAny', ) } # Password validation # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.0/howto/static-files/ STATIC_URL = '/static/'
[ "kmranrg@gmail.com" ]
kmranrg@gmail.com
265743f910dbb4abac5c1ec1331465c3b608f7a6
5a8eeb361feb99fdc54341c338b07fff93740050
/viirs/horidist/colorbar.py
163c4b3ffdecf9d37bd3ee065f2f9accb6a127c1
[]
no_license
gss2002/Team
2a85a2a0a9b4d1671451cf187a844a83e4952e3a
5ceb4da52245a3d6eb658fba67fd893b9792f5e4
refs/heads/master
2021-01-21T07:39:29.253192
2016-04-18T21:30:27
2016-04-18T21:30:27
null
0
0
null
null
null
null
UTF-8
Python
false
false
953
py
from pylab import * from numpy import * from matplotlib.colors import LinearSegmentedColormap class nlcmap(LinearSegmentedColormap): """A nonlinear colormap""" name = 'nlcmap' def __init__(self, cmap, levels): self.cmap = cmap # @MRR: Need to add N for backend self.N = cmap.N self.monochrome = self.cmap.monochrome self.levels = asarray(levels, dtype='float64') self._x = (self.levels-self.levels.min()) / (self.levels.max()-self.levels.min()) self._y = linspace(0.0, 1.0, len(self.levels)) #@MRR Need to add **kw for 'bytes' def __call__(self, xi, alpha=1.0, **kw): """docstring for fname""" # @MRR: Appears broken? # It appears something's wrong with the # dimensionality of a calculation intermediate #yi = stineman_interp(xi, self._x, self._y) yi = interp(xi, self._x, self._y) return self.cmap(yi, alpha)
[ "karrey05@gmail.com" ]
karrey05@gmail.com
5478f0ed6e84d36a081197e9e17ed5b0e151144f
7fa08c93ff0caa4c86d4fa1727643331e081c0d0
/brigid_api_client/models/__init__.py
116e0784876b77021f16b674f520d34edb77dc10
[ "LicenseRef-scancode-warranty-disclaimer", "MIT" ]
permissive
caltechads/brigid-api-client
760768c05280a4fb2f485e27c05f6ae24fbb7c6f
3e885ac9e7b3c00b8a9e0cc1fb7b53b468d9e10a
refs/heads/master
2023-03-23T03:11:02.446720
2021-03-13T00:47:03
2021-03-13T00:47:03
338,424,261
0
0
null
null
null
null
UTF-8
Python
false
false
7,175
py
""" Contains all the data models used in inputs/outputs """ from .action_response import ActionResponse from .action_response_errors import ActionResponseErrors from .application import Application from .applications_list_expand import ApplicationsListExpand from .applications_retrieve_expand import ApplicationsRetrieveExpand from .aws_account import AWSAccount from .aws_accounts_list_expand import AwsAccountsListExpand from .aws_accounts_retrieve_expand import AwsAccountsRetrieveExpand from .aws_clusters_list_expand import AwsClustersListExpand from .aws_clusters_retrieve_expand import AwsClustersRetrieveExpand from .aws_services_list_expand import AwsServicesListExpand from .aws_services_retrieve_expand import AwsServicesRetrieveExpand from .aws_tasks_list_expand import AwsTasksListExpand from .aws_tasks_retrieve_expand import AwsTasksRetrieveExpand from .aws_vpcs_list_expand import AwsVpcsListExpand from .aws_vpcs_retrieve_expand import AwsVpcsRetrieveExpand from .awsecs_cluster import AWSECSCluster from .awsecs_service import AWSECSService from .awsecs_task import AWSECSTask from .awsvpc import AWSVPC from .deployment import Deployment from .deployments_list_expand import DeploymentsListExpand from .deployments_retrieve_expand import DeploymentsRetrieveExpand from .docker_image_build import DockerImageBuild from .docker_image_builds_list_expand import DockerImageBuildsListExpand from .docker_image_builds_retrieve_expand import DockerImageBuildsRetrieveExpand from .ecosystem import Ecosystem from .ecosystems_list_expand import EcosystemsListExpand from .ecosystems_retrieve_expand import EcosystemsRetrieveExpand from .ecs_service_deploy import ECSServiceDeploy from .ecs_service_deploys_list_expand import EcsServiceDeploysListExpand from .ecs_service_deploys_retrieve_expand import EcsServiceDeploysRetrieveExpand from .ecs_task_deploy import ECSTaskDeploy from .ecs_task_deploys_list_expand import EcsTaskDeploysListExpand from .ecs_task_deploys_retrieve_expand import EcsTaskDeploysRetrieveExpand from .environment import Environment from .environments_list_expand import EnvironmentsListExpand from .environments_retrieve_expand import EnvironmentsRetrieveExpand from .organization import Organization from .organizations_list_expand import OrganizationsListExpand from .organizations_retrieve_expand import OrganizationsRetrieveExpand from .paginated_application_list import PaginatedApplicationList from .paginated_aws_account_list import PaginatedAWSAccountList from .paginated_awsecs_cluster_list import PaginatedAWSECSClusterList from .paginated_awsecs_service_list import PaginatedAWSECSServiceList from .paginated_awsecs_task_list import PaginatedAWSECSTaskList from .paginated_awsvpc_list import PaginatedAWSVPCList from .paginated_deployment_list import PaginatedDeploymentList from .paginated_docker_image_build_list import PaginatedDockerImageBuildList from .paginated_ecosystem_list import PaginatedEcosystemList from .paginated_ecs_service_deploy_list import PaginatedECSServiceDeployList from .paginated_ecs_task_deploy_list import PaginatedECSTaskDeployList from .paginated_environment_list import PaginatedEnvironmentList from .paginated_organization_list import PaginatedOrganizationList from .paginated_person_type_list import PaginatedPersonTypeList from .paginated_pipeline_invocation_list import PaginatedPipelineInvocationList from .paginated_pipeline_list import PaginatedPipelineList from .paginated_release_list import PaginatedReleaseList from .paginated_site_user_list import PaginatedSiteUserList from .paginated_software_list import PaginatedSoftwareList from .paginated_step_invocation_list import PaginatedStepInvocationList from .paginated_step_list import PaginatedStepList from .paginated_step_type_list import PaginatedStepTypeList from .paginated_team_list import PaginatedTeamList from .paginated_test_result_list import PaginatedTestResultList from .patched_application import PatchedApplication from .patched_aws_account import PatchedAWSAccount from .patched_awsecs_cluster import PatchedAWSECSCluster from .patched_awsecs_service import PatchedAWSECSService from .patched_awsecs_task import PatchedAWSECSTask from .patched_awsvpc import PatchedAWSVPC from .patched_deployment import PatchedDeployment from .patched_docker_image_build import PatchedDockerImageBuild from .patched_ecosystem import PatchedEcosystem from .patched_ecs_service_deploy import PatchedECSServiceDeploy from .patched_ecs_task_deploy import PatchedECSTaskDeploy from .patched_environment import PatchedEnvironment from .patched_organization import PatchedOrganization from .patched_person_type import PatchedPersonType from .patched_pipeline import PatchedPipeline from .patched_pipeline_invocation import PatchedPipelineInvocation from .patched_release import PatchedRelease from .patched_site_user import PatchedSiteUser from .patched_software import PatchedSoftware from .patched_step import PatchedStep from .patched_step_invocation import PatchedStepInvocation from .patched_step_type import PatchedStepType from .patched_team import PatchedTeam from .patched_test_result import PatchedTestResult from .person_type import PersonType from .pipeines_list_expand import PipeinesListExpand from .pipeines_retrieve_expand import PipeinesRetrieveExpand from .pipeline import Pipeline from .pipeline_invocation import PipelineInvocation from .pipeline_invocations_list_expand import PipelineInvocationsListExpand from .pipeline_invocations_retrieve_expand import PipelineInvocationsRetrieveExpand from .pipeline_step_invocations_list_expand import PipelineStepInvocationsListExpand from .pipeline_step_invocations_retrieve_expand import ( PipelineStepInvocationsRetrieveExpand, ) from .pipeline_steps_list_expand import PipelineStepsListExpand from .pipeline_steps_retrieve_expand import PipelineStepsRetrieveExpand from .release import Release from .release_import import ReleaseImport from .release_import_all import ReleaseImportAll from .release_import_all_response import ReleaseImportAllResponse from .release_import_all_response_errors import ReleaseImportAllResponseErrors from .releases_list_expand import ReleasesListExpand from .releases_retrieve_expand import ReleasesRetrieveExpand from .schema_retrieve_format import SchemaRetrieveFormat from .schema_retrieve_response_200 import SchemaRetrieveResponse_200 from .service_enum import ServiceEnum from .site_user import SiteUser from .site_users_list_expand import SiteUsersListExpand from .site_users_retrieve_expand import SiteUsersRetrieveExpand from .software import Software from .software_import import SoftwareImport from .software_list_expand import SoftwareListExpand from .software_retrieve_expand import SoftwareRetrieveExpand from .status_enum import StatusEnum from .step import Step from .step_invocation import StepInvocation from .step_type import StepType from .team import Team from .teams_list_expand import TeamsListExpand from .teams_retrieve_expand import TeamsRetrieveExpand from .test_result import TestResult from .test_results_list_expand import TestResultsListExpand from .test_results_retrieve_expand import TestResultsRetrieveExpand
[ "cmalek@caltech.edu" ]
cmalek@caltech.edu
ad39b4047c556d1d07c6e8d63f416c3a80fc3184
97aa47340e99f7be364f27cba87e499d942eab43
/find_factorial.py
9d137b5c4cc6e3b31248b88d7307a774fa3a810f
[]
no_license
eryilmazysf/assignments-
cbe0d0d761a0a3da819c456ea0d9accb86175a35
c1b3084b39ea72ae14fdc4c564d94c26ca198806
refs/heads/master
2022-12-11T00:22:59.427632
2020-09-02T15:26:12
2020-09-02T15:26:12
277,168,892
0
0
null
null
null
null
UTF-8
Python
false
false
372
py
print(""" ************************** FİND MY FACTORİAL VALUE to quit enter "q" ************************** """) while True: number=int(input("please enter number to find fatorial value:")) if number=="q": print("see you") break factorial = 1 for i in range(2,number+1): factorial*=i print(factorial)
[ "yusuferyilmaz1819@gmail.com" ]
yusuferyilmaz1819@gmail.com