commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
1b625ee89032e65e3510286d47a810a893f71e82
Fix a bug in how getPlatform is accessed
TUI/Main.py
TUI/Main.py
#!/usr/bin/env python """Telescope User Interface. This is the main routine that calls everything else. 2003-02-27 ROwen First version with history. Modified to use the new Hub authorization 2003-03-20 ROwen Added DIS 2003-03-25 ROwen Moved TCC widgets into TCC subdirectory; modified to load TUI windows from TUIWindow.py and to auto-load windows from TCC, Inst and Misc directories 2003-04-04 ROwen Fixed auto-load code to be platform-independent. 2003-04-22 ROwen Modified to not auto-load window modules whose file name begins with ".". 2003-06-09 ROwen Modified to use TUIModel. 2003-06-18 ROwen Modified to print a full traceback for unexpected errors; modified to exclude SystemExit and KeyboardInterrupt when testing for general exceptions. 2003-12-17 ROwen Modified to auto load windows from all of the TCC package (instead of specific sub-packages) and also from TUISharedAdditions and TUIUserAdditions. 2004-01-23 ROwen Modified to not rely on modules being loaded from the same dir as this file. This simplifies generating a Mac standalone app. Modified to load *all* windows in TUI, rather than searching specific directories. Improved error handling of loadWindows: - if TUI cannot be loaded, fail - reject module names with "." in them (both changes help debug problems with making standalone apps). 2004-02-05 ROwen Changed the algorithm for finding user additions. 2004-02-06 ROwen Adapted to RO.OS.walkDirs->RO.OS.findFiles. 2004-02-17 ROwen Changed to call buildMenus instead of buildAutoMenus in the "None.Status" toplevel. . 2004-03-03 ROwen Modified to print the version number during startup. 2004-03-09 ROwen Bug fix: unix code was broken. 2004-05-17 ROwen Modified to be runnable by an external script (e.g. runtui.py). Modified to print version to log rather than stdout. 2004-07-09 ROwen Modified to use TUI.TUIPaths 2004-10-06 ROwen Modified to use TUI.MenuBar. 2005-06-16 ROwen Modified to use improved KeyDispatcher.logMsg. 2005-07-22 ROwen Modified to hide tk's console window if present. 2005-08-01 ROwen Modified to use TUI.LoadStdModules, a step towards allowing TUI code to be run from a zip file. 2005-08-08 ROwen Moved loadWindows and findWindowsModules to WindowModuleUtil.py 2005-09-22 ROwen Modified to use TUI.TUIPaths.getAddPaths instead of getTUIPaths. 2006-10-25 ROwen Modified to not send dispatcher to BackgroundTasks. 2007-01-22 ROwen Modified to make sure sys.executable is absolute, as required for use with pyinstaller 1.3. 2007-12-20 ROwen Import and configure matplotlib here and stop configuring it elsewhere. This works around a problem in matplotlib 0.91.1: "use" can't be called after "import matplotlib.backends". 2009-08-06 ROwen Stopped setting matplotlib numerix parameter; it is obsolete as of matplotlib 0.99.0. 2009-11-05 ROwen Fix matplotlib warning by calling use before loading any TUI modules. 2010-03-12 ROwen Changed to use Models.getModel. 2010-05-05 ROwen Configure twisted.internet to use Tk right away. Formerly that was done in the TUI model, but by then various parts of twisted were imported so this seems safer. 2010-05-10 ROwen Fix ticket #825: main tk window visible (broken in the 2010-05-05 changes). 2010-05-21 ROwen Undo the changes of 2010-05-05 and 2010-05-10 since it broke test code. 2010-11-17 ROwen Suppress numpy division warnings. 2010-11-18 ROwen Disabled all numpy warnings to suppress "Warning: invalid value encountered in divide" (simply disabling divide warnings did not do it). 2012-07-18 ROwen Modified to use RO 3.0 and optionally communicate using Twisted framework. 2013-07-19 ROwen Modified to print some info to stdout (e.g. the log) on startup. Modified to only show the version name, not version date, in the log at startup. """ import os import sys import time import Tkinter import numpy numpy.seterr(all="ignore") # suppress "Warning: invalid value encountered in divide" import matplotlib matplotlib.use("TkAgg") # controls the background of the axis label regions (which default to gray) matplotlib.rc("figure", facecolor="white") matplotlib.rc("axes", titlesize="medium") # default is large, which is too big matplotlib.rc("legend", fontsize="medium") # default is large, which is too big import RO.Comm.Generic RO.Comm.Generic.setFramework("tk") import TUI.BackgroundTasks import TUI.LoadStdModules import TUI.MenuBar import TUI.TUIPaths import TUI.Models import TUI.WindowModuleUtil import TUI.Version # hack for pyinstaller 1.3 sys.executable = os.path.abspath(sys.executable) def runTUI(): """Run TUI. """ # Hide the Tk root; must do this before setting up preferences (which is done by the tui model). tkRoot = Tkinter.Tk() tkRoot.withdraw() # if console exists, hide it try: tkRoot.tk.call("console", "hide") except Tkinter.TclError: pass # create and obtain the TUI model tuiModel = TUI.Models.getModel("tui") # set up background tasks backgroundHandler = TUI.BackgroundTasks.BackgroundKwds() # get locations to look for windows addPathList = TUI.TUIPaths.getAddPaths() # add additional paths to sys.path sys.path += addPathList TUI.LoadStdModules.loadAll() # load additional windows modules for winPath in addPathList: TUI.WindowModuleUtil.loadWindows( path = winPath, tlSet = tuiModel.tlSet, logFunc = tuiModel.logMsg, ) # add the main menu TUI.MenuBar.MenuBar() tuiModel.logMsg( "TUI Version %s: ready to connect" % (TUI.Version.VersionName,) ) startTimeStr = time.strftime("%Y-%m-%dT%H:%M:%S") platformStr = TUI.TUIModel.getPlatform() sys.stdout.write("TUI %s running on %s started %s\n" % (TUI.Version.VersionName, platformStr, startTimeStr)) tuiModel.reactor.run() if __name__ == "__main__": runTUI()
Python
0.001122
@@ -4938,16 +4938,60 @@ .Models%0A +from TUI.Models.TUIModel import getPlatform%0A import T @@ -6262,21 +6262,8 @@ r = -TUI.TUIModel. getP
d48a85f62d982af3fc16dd921ac13a28640e39d7
version bump
cbsyst/__init__.py
cbsyst/__init__.py
from cbsyst.cbsyst import * from cbsyst.helpers import data_out, Bunch __version__ = '0.3.7'
Python
0.000001
@@ -87,7 +87,11 @@ 0.3. -7 +8-dev '%0A
cf67c8fc98048014cf2dfce7b2f4fa437ed92500
Check if user object is set before calling is_authenticated on it.
session_csrf/__init__.py
session_csrf/__init__.py
"""CSRF protection without cookies.""" import functools import hashlib from django.conf import settings from django.core.cache import cache from django.middleware import csrf as django_csrf from django.utils import crypto from django.utils.cache import patch_vary_headers ANON_COOKIE = getattr(settings, 'ANON_COOKIE', 'anoncsrf') ANON_TIMEOUT = getattr(settings, 'ANON_TIMEOUT', 60 * 60 * 2) # 2 hours. ANON_ALWAYS = getattr(settings, 'ANON_ALWAYS', False) PREFIX = 'sessioncsrf:' # This overrides django.core.context_processors.csrf to dump our csrf_token # into the template context. def context_processor(request): # Django warns about an empty token unless you call it NOTPROVIDED. return {'csrf_token': getattr(request, 'csrf_token', 'NOTPROVIDED')} def prep_key(key): """ In case a bogus request comes in with a large or wrongly formatted massive anoncsrf cookie value, memcache will raise a MemcachedKeyLengthError or MemcachedKeyCharacterError. We hash the key here in order to have a predictable length and character set. """ prefixed = PREFIX + key return hashlib.md5(prefixed).hexdigest() class CsrfMiddleware(object): # csrf_processing_done prevents checking CSRF more than once. That could # happen if the requires_csrf_token decorator is used. def _accept(self, request): request.csrf_processing_done = True def _reject(self, request, reason): return django_csrf._get_failure_view()(request, reason) def process_request(self, request): """ Add a CSRF token to the session for logged-in users. The token is available at request.csrf_token. """ if hasattr(request, 'csrf_token'): return if request.user.is_authenticated(): if 'csrf_token' not in request.session: token = django_csrf._get_new_csrf_key() request.csrf_token = request.session['csrf_token'] = token else: request.csrf_token = request.session['csrf_token'] else: key = None token = '' if ANON_COOKIE in request.COOKIES: key = request.COOKIES[ANON_COOKIE] token = cache.get(prep_key(key), '') if ANON_ALWAYS: if not key: key = django_csrf._get_new_csrf_key() if not token: token = django_csrf._get_new_csrf_key() request._anon_csrf_key = key cache.set(prep_key(key), token, ANON_TIMEOUT) request.csrf_token = token def process_view(self, request, view_func, args, kwargs): """Check the CSRF token if this is a POST.""" if getattr(request, 'csrf_processing_done', False): return # Allow @csrf_exempt views. if getattr(view_func, 'csrf_exempt', False): return if (getattr(view_func, 'anonymous_csrf_exempt', False) and not request.user.is_authenticated()): return # Bail if this is a safe method. if request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'): return self._accept(request) # The test client uses this to get around CSRF processing. if getattr(request, '_dont_enforce_csrf_checks', False): return self._accept(request) # Try to get the token from the POST and fall back to looking at the # X-CSRFTOKEN header. user_token = request.POST.get('csrfmiddlewaretoken', '') if user_token == '': user_token = request.META.get('HTTP_X_CSRFTOKEN', '') request_token = getattr(request, 'csrf_token', '') # Check that both strings aren't empty and then check for a match. if not ((user_token or request_token) and crypto.constant_time_compare(user_token, request_token)): reason = django_csrf.REASON_BAD_TOKEN django_csrf.logger.warning( 'Forbidden (%s): %s' % (reason, request.path), extra=dict(status_code=403, request=request)) return self._reject(request, reason) else: return self._accept(request) def process_response(self, request, response): if hasattr(request, '_anon_csrf_key'): # Set or reset the cache and cookie timeouts. response.set_cookie(ANON_COOKIE, request._anon_csrf_key, max_age=ANON_TIMEOUT, httponly=True, secure=request.is_secure()) patch_vary_headers(response, ['Cookie']) return response def anonymous_csrf(f): """Decorator that assigns a CSRF token to an anonymous user.""" @functools.wraps(f) def wrapper(request, *args, **kw): use_anon_cookie = not (request.user.is_authenticated() or ANON_ALWAYS) if use_anon_cookie: if ANON_COOKIE in request.COOKIES: key = request.COOKIES[ANON_COOKIE] token = cache.get(prep_key(key)) or django_csrf._get_new_csrf_key() else: key = django_csrf._get_new_csrf_key() token = django_csrf._get_new_csrf_key() cache.set(prep_key(key), token, ANON_TIMEOUT) request.csrf_token = token response = f(request, *args, **kw) if use_anon_cookie: # Set or reset the cache and cookie timeouts. response.set_cookie(ANON_COOKIE, key, max_age=ANON_TIMEOUT, httponly=True, secure=request.is_secure()) patch_vary_headers(response, ['Cookie']) return response return wrapper def anonymous_csrf_exempt(f): """Like @csrf_exempt but only for anonymous requests.""" f.anonymous_csrf_exempt = True return f # Replace Django's middleware with our own. def monkeypatch(): from django.views.decorators import csrf as csrf_dec django_csrf.CsrfViewMiddleware = CsrfMiddleware csrf_dec.csrf_protect = csrf_dec.decorator_from_middleware(CsrfMiddleware)
Python
0
@@ -1737,32 +1737,61 @@ turn%0A if +hasattr(request, 'user') and request.user.is_ @@ -3025,16 +3025,46 @@ and not +(hasattr(request, 'user') and request. @@ -3087,16 +3087,17 @@ cated()) +) :%0A @@ -4898,16 +4898,45 @@ = not ( +hasattr(request, 'user') and request. @@ -4958,19 +4958,25 @@ icated() - or +) and not ANON_AL @@ -4979,17 +4979,16 @@ N_ALWAYS -) %0A
d52c4340a62802bcd0fcbd68516c5ac66fb10436
Update function name used in the streamtester
ftfy/streamtester/__init__.py
ftfy/streamtester/__init__.py
""" This file defines a general method for evaluating ftfy using data that arrives in a stream. A concrete implementation of it is found in `twitter_tester.py`. """ from __future__ import print_function, unicode_literals from ftfy.fixes import fix_text_encoding from ftfy.chardata import possible_encoding class StreamTester: """ Take in a sequence of texts, and show the ones that will be changed by ftfy. This will also periodically show updates, such as the proportion of texts that changed. """ def __init__(self): self.num_fixed = 0 self.count = 0 def check_ftfy(self, text): """ Given a single text input, check whether `ftfy.fix_text_encoding` would change it. If so, display the change. """ self.count += 1 if not possible_encoding(text, 'ascii'): fixed = fix_text_encoding(text) if text != fixed: # possibly filter common bots before printing print(u'\nText:\t{text}\nFixed:\t{fixed}\n'.format( text=text, fixed=fixed )) self.num_fixed += 1 # Print status updates once in a while if self.count % 100 == 0: print('.', end='', flush=True) if self.count % 10000 == 0: print('\n%d/%d fixed' % (self.num_fixed, self.count))
Python
0
@@ -237,29 +237,24 @@ import fix_ -text_ encoding%0Afro @@ -859,29 +859,24 @@ fixed = fix_ -text_ encoding(tex
cd9b16dd5bd9ae41fb8cf7a3f8a2b02dfeb227bf
set db for me
gamechat/gamechat/settings.py
gamechat/gamechat/settings.py
""" Django settings for gamechat project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import dj_database_url BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '63v10%^prdclwsk*$%=82=0=rhp=p6s@j7p%^$7fp0nm1i4hxa' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] ACCOUNT_ACTIVATION_DAYS = 7 REGISTRATION_AUTO_LOGIN = True # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'chat', 'game_calendar', 'profiles', 'registration', 'sorl.thumbnail', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'gamechat.urls' WSGI_APPLICATION = 'gamechat.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': dj_database_url.config( default='postgres://postgres:@localhost:5432/game_chat_db' ) } EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'smtp.gmail.com' DEFAULT_FROM_EMAIL = 'gamechatsite@gmail.com' EMAIL_PORT = 587 EMAIL_USE_TLS = True EMAIL_HOST_USER = 'gamechatsite@gmail.com' EMAIL_HOST_PASSWORD = 'chatsite123' # 'default': { # 'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'NAME': 'game_chat_db', # 'USER': os.environ.get('USER'), # } # } SECRET_KEY = os.environ.get('SECRET_KEY', 'secret') # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'gamechat/static') ] MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'gamechat/templates'), os.path.join(BASE_DIR, 'profiles/templates'), os.path.join(BASE_DIR, 'chat/templates'), os.path.join(BASE_DIR, 'game_calendar/templates'), )
Python
0
@@ -1775,16 +1775,18 @@ abases%0A%0A +# DATABASE @@ -1791,16 +1791,18 @@ SES = %7B%0A +# 'def @@ -1832,24 +1832,25 @@ config(%0A +# default= @@ -1841,16 +1841,17 @@ + default= @@ -1905,14 +1905,18 @@ db'%0A +# )%0A +# %7D%0A%0AE @@ -2169,18 +2169,30 @@ ite123'%0A -# +DATABASES = %7B%0A 'def @@ -2200,18 +2200,16 @@ ult': %7B%0A -# @@ -2260,18 +2260,16 @@ copg2',%0A -# @@ -2292,18 +2292,16 @@ at_db',%0A -# @@ -2336,18 +2336,14 @@ '),%0A -# %7D%0A -# %7D%0A%0AS
7a786fd031c3faa057256abc5d9cb47618041696
Configure max build age on the monitoring side
checks.d/veneur.py
checks.d/veneur.py
import datetime from urlparse import urljoin import requests # project from checks import AgentCheck class Veneur(AgentCheck): VERSION_METRIC_NAME = 'veneur.deployed_version' BUILDAGE_METRIC_NAME = 'veneur.build_age' MAX_AGE_CHECK_NAME = 'veneur.build_age.fresh' # Check that the build is no more than one week old MAX_DEPLOYMENT_INTERVAL = 604800 def check(self, instance): success = 0 host = instance['host'] try: r = requests.get(urljoin(host, '/version')) sha = r.text success = 1 r = requests.get(urljoin(host, '/builddate')) builddate = datetime.datetime.fromtimestamp(int(r.text)) tdelta = datetime.datetime.now() - builddate if tdelta.seconds > self.MAX_DEPLOYMENT_INTERVAL: self.service_check(self.MAX_AGE_CHECK_NAME, AgentCheck.CRITICAL, message='Build date {0} is too old (build must be no more than {1} seconds old)'.format(builddate.strftime('%Y-%m-%d %H:%M:%S'), self.MAX_DEPLOYMENT_INTERVAL)) except: success = 0 raise finally: self.gauge(self.VERSION_METRIC_NAME, success, tags = ['sha:{0}'.format(sha)]) self.histogram(self.BUILDAGE_METRIC_NAME, tdelta.seconds)
Python
0
@@ -226,154 +226,8 @@ e'%0A%0A - MAX_AGE_CHECK_NAME = 'veneur.build_age.fresh'%0A%0A # Check that the build is no more than one week old%0A MAX_DEPLOYMENT_INTERVAL = 604800%0A%0A%0A @@ -616,336 +616,8 @@ te%0A%0A - if tdelta.seconds %3E self.MAX_DEPLOYMENT_INTERVAL:%0A self.service_check(self.MAX_AGE_CHECK_NAME, AgentCheck.CRITICAL,%0A message='Build date %7B0%7D is too old (build must be no more than %7B1%7D seconds old)'.format(builddate.strftime('%25Y-%25m-%25d %25H:%25M:%25S'), self.MAX_DEPLOYMENT_INTERVAL))%0A%0A
15a1cf906ae01a842e92442b59984953517c28c7
Fix bind_addr check in testing module
cheroot/testing.py
cheroot/testing.py
"""Pytest fixtures and other helpers for doing testing by end-users.""" from contextlib import closing import errno import socket import threading import time import pytest from six.moves import http_client import cheroot.server from cheroot.test import webtest import cheroot.wsgi EPHEMERAL_PORT = 0 NO_INTERFACE = None # Using this or '' will cause an exception ANY_INTERFACE_IPV4 = '0.0.0.0' ANY_INTERFACE_IPV6 = '::' config = { cheroot.wsgi.Server: { 'bind_addr': (NO_INTERFACE, EPHEMERAL_PORT), 'wsgi_app': None, }, cheroot.server.HTTPServer: { 'bind_addr': (NO_INTERFACE, EPHEMERAL_PORT), 'gateway': cheroot.server.Gateway, }, } def cheroot_server(server_factory): """Set up and tear down a Cheroot server instance.""" conf = config[server_factory].copy() bind_port = conf.pop('bind_addr')[-1] for interface in ANY_INTERFACE_IPV6, ANY_INTERFACE_IPV4: try: actual_bind_addr = (interface, bind_port) httpserver = server_factory( # create it bind_addr=actual_bind_addr, **conf ) except OSError: pass else: break threading.Thread(target=httpserver.safe_start).start() # spawn it while not httpserver.ready: # wait until fully initialized and bound time.sleep(0.1) yield httpserver httpserver.stop() # destroy it @pytest.fixture(scope='module') def wsgi_server(): """Set up and tear down a Cheroot WSGI server instance.""" for srv in cheroot_server(cheroot.wsgi.Server): yield srv @pytest.fixture(scope='module') def native_server(): """Set up and tear down a Cheroot HTTP server instance.""" for srv in cheroot_server(cheroot.server.HTTPServer): yield srv class _TestClient(object): def __init__(self, server): self._interface, self._host, self._port = _get_conn_data(server) self._http_connection = self.get_connection() self.server_instance = server def get_connection(self): name = '{interface}:{port}'.format( interface=self._interface, port=self._port, ) return http_client.HTTPConnection(name) def request( self, uri, method='GET', headers=None, http_conn=None, protocol='HTTP/1.1', ): return webtest.openURL( uri, method=method, headers=headers, host=self._host, port=self._port, http_conn=http_conn or self._http_connection, protocol=protocol, ) def __getattr__(self, attr_name): def _wrapper(uri, **kwargs): http_method = attr_name.upper() return self.request(uri, method=http_method, **kwargs) return _wrapper def _probe_ipv6_sock(interface): # Alternate way is to check IPs on interfaces using glibc, like: # github.com/Gautier/minifail/blob/master/minifail/getifaddrs.py try: with closing(socket.socket(family=socket.AF_INET6)) as sock: sock.bind((interface, 0)) except (OSError, socket.error) as sock_err: # In Python 3 socket.error is an alias for OSError # In Python 2 socket.error is a subclass of IOError if sock_err.errno != errno.EADDRNOTAVAIL: raise else: return True return False def _get_conn_data(server): host, port = server.bind_addr interface = webtest.interface(host) if ':' in interface and not _probe_ipv6_sock(interface): interface = '127.0.0.1' if ':' in host: host = interface return interface, host, port def get_server_client(server): """Create and return a test client for the given server.""" return _TestClient(server)
Python
0.000001
@@ -3411,37 +3411,136 @@ -host, port = server.bind_addr +if isinstance(server.bind_addr, tuple):%0A host, port = server.bind_addr%0A else:%0A host, port = server.bind_addr, 0 %0A%0A
b2311319bc61f6e84943e801348e3b55b00d410f
remove unnecessary sys.path hack
chroot/__init__.py
chroot/__init__.py
__version__ = '0.9.6' import os from os.path import abspath, normpath, join, exists, basename import sys # pylint: disable=E0611 # python-3.3 not yet supported in pylint if sys.hexversion >= 0x03030000: from socket import sethostname # make sure that we are in sys.path if abspath(normpath(join(__file__, '../..'))) not in sys.path: sys.path.append(abspath(normpath(join(__file__, '../..')))) from chroot.unshare import unshare, CLONE_NEWUTS, CLONE_NEWNS, CLONE_NEWIPC # pylint: disable=F0401 # pylint likely will not see this stuff.. from chroot.base import WithParentSkip from chroot.definitions import DEFAULT_MOUNTS from chroot.exceptions import ChrootError, ChrootMountError from chroot.utils import bind, MountError, getlogger, dictbool class Chroot(WithParentSkip): """Context Manager class that implements running the contents of the with-context in a chroot. This is done by forking, doing some magic on the stack so the contents are not executed in the parent, and executing the context in the forked child. Exceptions are pickled and passed through to the parent. :param path: The path to the image to chroot into. :type path: str :param log: A log object to use for logging. :type log: logging.Logger :param mountpoints: A dictionary defining the mountpoints to use. These can override any of the defaults or add extra mountpoints :type mountpoints: dict :param hostname: The hostname to use in the chroot. If left blank then the basename of the path is used. :type hostname: str """ def __init__(self, path, log=None, mountpoints=None, hostname=None): self.log = getlogger(log, __name__) # TODO: capabilities check as well? # see http://marc.info/?l=python-dev&m=116406900432743 if os.geteuid() != 0: raise ChrootError('Cannot use chroot module when not running as root') elif hostname is not None and not isinstance(hostname, str): raise ChrootError('Hostname parameter passed a non-string object') super(Chroot, self).__init__() self.__unshared = False if not exists(abspath(path)): raise ChrootError('Attempt to chroot into a nonexistent path') self.path = abspath(path) self.mountpoints = DEFAULT_MOUNTS self.mountpoints.update(mountpoints if mountpoints else {}) # flag mount points that require creation and removal for mount, chrmount, opts in ((m, join(self.path, o['dest'].lstrip('/')) if 'dest' in o else join(self.path, m.lstrip('/')), o) for m, o in self.mountpoints.items()): src = mount # expand mountpoints that are environment variables if mount.startswith('$'): src = os.getenv(mount[1:]) if src is None: raise ChrootMountError('Environment variable "{}" is not defined in ' 'the host environment'.format(mount)) self.log.debug('Expanding mountpoint "%s" to "%s"', mount, src) self.mountpoints[src] = opts del self.mountpoints[mount] if '$' in chrmount: chrmount = join(self.path, src.lstrip('/')) if not 'optional' in opts and not exists(chrmount): self.mountpoints[src]['create'] = True if hostname is not None: self.hostname = hostname if sys.hexversion < 0x03030000: self.log.warn('Unable to set hostname on Python < 3.3') else: self.hostname = basename(self.path) def child_setup(self): self.unshare() self.mount() os.chroot(self.path) os.chdir('/') def cleanup(self): # remove mount points that were dynamically created for chrmount in (join(self.path, o['dest'].lstrip('/')) if 'dest' in o else join(self.path, m.lstrip('/')) for m, o in self.mountpoints.items() if 'create' in o): self.log.debug('Removing dynamically created mountpoint "%s"', chrmount) try: if not os.path.isdir(chrmount): os.remove(chrmount) chrmount = os.path.dirname(chrmount) os.removedirs(chrmount) # don't fail if leaf directories aren't empty when trying to remove them except OSError: pass except: raise ChrootMountError('Failed to remove chroot mount point "{}"'.format(chrmount)) def unshare(self): """ Use Linux namespaces to add the current process to a new UTS (hostname) namespace, new mount namespace and new IPC namespace. """ unshare(CLONE_NEWUTS | CLONE_NEWNS | CLONE_NEWIPC) # set the hostname in the chroot process to hostname for the chroot if sys.hexversion >= 0x03030000: sethostname(self.hostname) self.__unshared = True def mount(self): """Do the bind mounts for this chroot object. This _must_ be run after unshare.""" if not self.__unshared: raise ChrootMountError('Attempted to run mount method without running unshare method') for mount, chrmount, opts in ((m, join(self.path, o['dest'].lstrip('/')) if 'dest' in o else join(self.path, m.lstrip('/')), o) for m, o in self.mountpoints.items() if not m.startswith('$')): if dictbool(opts, 'optional') and not exists(mount): self.log.debug('Not mounting "%s" as it\'s optional and doesn\'t exist', mount) continue try: kwargs = {k: v for k, v in opts.items() if k != 'dest'} bind(src=mount, dest=chrmount, log=self.log, **kwargs) except MountError as ex: raise ChrootMountError(str(ex)) # vim:et:ts=4:sw=4:tw=120:sts=4:ai:
Python
0.000001
@@ -239,172 +239,8 @@ me%0A%0A -# make sure that we are in sys.path%0Aif abspath(normpath(join(__file__, '../..'))) not in sys.path:%0A sys.path.append(abspath(normpath(join(__file__, '../..'))))%0A%0A from
ef6e0b681c1c7812e9d11fcd2fffd36468c00513
Create custom field : SearchButtonField (#77)
cineapp/widgets.py
cineapp/widgets.py
# -*- coding: utf-8 -*- from wtforms import fields, widgets # Define wtforms widget and field class CKTextAreaWidget(widgets.TextArea): def __call__(self, field, **kwargs): kwargs.setdefault('class_', 'ckeditor') html_string = super(CKTextAreaWidget, self).__call__(field, **kwargs) html_string += ("""<script> CKEDITOR.replace( '%s', { enterMode: CKEDITOR.ENTER_BR } ); </script>""" % field.id) return widgets.HTMLString(html_string) class CKTextAreaField(fields.TextAreaField): widget = CKTextAreaWidget()
Python
0
@@ -558,8 +558,754 @@ idget()%0A +%0A# Widget which returns a complete search bar with a glyphicon button%0Aclass SearchButtonWidget(widgets.SubmitInput):%0A%0A html_params = staticmethod(widgets.html_params)%0A input_type = 'submit'%0A%0A def __call__(self, field, **kwargs):%0A kwargs.setdefault('id', field.id)%0A kwargs.setdefault('type', self.input_type)%0A kwargs.setdefault('value', field.label.text)%0A%0A if 'value' not in kwargs:%0A kwargs%5B'value'%5D = field._value()%0A%0A return widgets.HTMLString('%3Cbutton %25s%3E%3Ci class=%22glyphicon glyphicon-search%22%3E%3C/i%3E%3C/button%3E' %25 self.html_params(name=field.name, **kwargs))%0A%0A# SearchButtonField used for display the previous widget%0Aclass SearchButtonField(fields.BooleanField):%0A%09widget = SearchButtonWidget()%0A
b54a3fa45cca86fddcd6130e67a306d93a079fff
update Config File parsing to new API
Samples/Python/sample.py
Samples/Python/sample.py
import Ogre import OgreRTShader class SGResolver(Ogre.MaterialManager_Listener): def __init__(self, shadergen): Ogre.MaterialManager_Listener.__init__(self) self.shadergen = shadergen def handleSchemeNotFound(self, idx, name, mat, lod_idx, rend): if name != OgreRTShader.cvar.ShaderGenerator_DEFAULT_SCHEME_NAME: return None def_name = Ogre.cvar.MaterialManager_DEFAULT_SCHEME_NAME succ = self.shadergen.createShaderBasedTechnique(mat.getName(), def_name, name) if not succ: return None self.shadergen.validateMaterial(name, mat.getName()) return mat.getTechnique(1) def main(): root = Ogre.Root("plugins.cfg", "ogre.cfg", "") cfg = Ogre.ConfigFile() cfg.loadDirect("resources.cfg") rgm = Ogre.ResourceGroupManager.getSingleton() for sec in ("Essential", "Popular"): for kind in ("Zip", "FileSystem"): for loc in cfg.getMultiSetting(kind, sec): rgm.addResourceLocation(loc, kind, sec) if not root.restoreConfig(): root.showConfigDialog(Ogre.ConfigDialog()) win = root.initialise(True) OgreRTShader.ShaderGenerator.initialize() shadergen = OgreRTShader.ShaderGenerator.getSingleton() sgres = SGResolver(shadergen) Ogre.MaterialManager.getSingleton().addListener(sgres) rgm.initialiseAllResourceGroups() rs = shadergen.getRenderState(OgreRTShader.cvar.ShaderGenerator_DEFAULT_SCHEME_NAME) rs.addTemplateSubRenderState(shadergen.createSubRenderState(OgreRTShader.cvar.PerPixelLighting_Type)); scn_mgr = root.createSceneManager(Ogre.ST_GENERIC) shadergen.addSceneManager(scn_mgr) scn_mgr.setAmbientLight(Ogre.ColourValue(.1, .1, .1)) light = scn_mgr.createLight("MainLight") light.setPosition(0, 10, 15) cam = scn_mgr.createCamera("myCam") cam.setPosition(0, 0, 15) cam.setNearClipDistance(5) cam.lookAt(0, 0, -1) vp = win.addViewport(cam) vp.setBackgroundColour(Ogre.ColourValue(.3, .3, .3)) ent = scn_mgr.createEntity("Sinbad.mesh") node = scn_mgr.getRootSceneNode().createChildSceneNode() node.attachObject(ent) root.startRendering() if __name__ == "__main__": main()
Python
0
@@ -860,78 +860,98 @@ sec - in (%22Essential%22, %22Popular%22):%0A for kind in (%22Zip%22, %22FileSystem%22 +, settings in cfg.getSettingsBySection().items():%0A for kind, loc in settings.items( ):%0A @@ -965,26 +965,67 @@ -for loc in +rgm.addResourceLocation(loc, kind, sec)%0A%0A arch = cfg.get Mult @@ -1024,45 +1024,40 @@ .get -Multi Setting -(kind, sec):%0A +s(%22General%22).values()%5B0%5D%0A @@ -1080,32 +1080,76 @@ ocation( -loc, kind, sec)%0A +arch + %22/materials/programs/GLSL%22, %22FileSystem%22, %22General%22); %0A if
f5161f202dab424bd0e77688dd8a1e687e0f4df0
disable locking for now
vizier/mqttinterface.py
vizier/mqttinterface.py
import paho.mqtt.client as mqtt import queue import threading import logging # CountDownLatch for some MQTT client checking class _CountDownLatch(): """This class handles some synchronization behind starting the paho MQTT client Attributes: _cv (threading.Condition): Condition variable for waiting on the countdown _count (int): Current count of the latch. Calls to wait return when count reaches 0 """ def __init__(self, count=1): self._cv = threading.Condition() self._count = count def _counted_down(self): """Helper function to determine if the countdown has occured""" return self._count <= 0 def count_down(self): """Thread safe. When the count reaches 0, all waits return""" with self._cv: self._count -= 1 self._count = max(self._count, 0) if(self._count <= 0): self._cv.notify_all() def wait(self, timeout=None): """Thread safe. Waits for the count to reach 0 Args: timeout (double): timeout to wait on latch """ with self._cv: self._cv.wait_for(self._counted_down, timeout=timeout) class MQTTInterface: """This is a wrapper around the Paho MQTT interface with enhanced functionality Attributes: host (str): The MQTT broker's host to which this client connects. port (int): The MQTT broker's port to which this client connects. """ def __init__(self, port=1884, keep_alive=60, host="localhost", logging_config=None): # Set up MQTT client self.host = host self.port = port self.keep_alive = keep_alive # Lock for the various methods self.lock = threading.RLock() self.publish_lock = threading.Lock() # self.client_queue = queue.Queue() self.client = mqtt.Client() self.client.on_message = self._on_message # I shouldn't need a lock for this... self.channels = {} self.callbacks = {} # For logging if(logging_config): logging.configDict(logging_config) self.logger = logging.getLogger(__name__) else: self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.DEBUG) # The callback for when a PUBLISH message is received from the server. def _on_message(self, client, userdata, msg): """Thread safe. Callback handling messages from the client. Either puts the message into a callback or a channel Args: client: Client from which message was recieved userdata: Data about the client msg: MQTT payload """ # Unsubscribe could happen between these statements, so we need the lock with self.lock: if(msg.topic in self.callbacks): # Just pass the byte payload directly to the callback, since the other info is more or less useless self.callbacks[msg.topic](msg.payload) def subscribe_with_callback(self, channel, callback): """Thread safe. Subscribes to a channel with a callback using the underlying MQTT client. All messages to that channel will be passed into the callback Args: channel (str): Channel to which the node subscribes callback (function): Callback function for the topic """ with self.lock: self.callbacks.update({channel: callback}) self.client.subscribe(channel) def subscribe(self, channel): """Thread safe. A subscribe routine that yields a queue to which all subsequent messages to the given topic will be passed Args: channel (str): Channel to which the client will subscribe Returns: A queue containing all future messages from the supplied channel """ # Should be thread safe, since locking is handled in subscribe_with_callback q = queue.Queue() def f(msg): nonlocal q q.put(msg) self.subscribe_with_callback(channel, f) return q def unsubscribe(self, channel): """Thread safe. Unsubscribes from a particular channel Args: channel (str): Channel from which the client unsubscribes """ with self.lock: self.client.unsubscribe(channel) self.channels.pop(channel, None) def send_message(self, channel, message): """Thread safe. Sends a message on the MQTT client. Args: channel (str): string (channel on which to send message) message (bytes): Message to be sent. Should be in an encoded bytes format (like UTF-8) """ # TODO: Ensure that this function is actually thread-safe. Don't think it is # TODO: Rename to publish? with self.publish_lock: self.client.publish(channel, message) def start(self, timeout=None): """Handles starting the underlying MQTT client""" cdl = _CountDownLatch(1) # Local function to handle connection to the MQTT server def on_connect(client, userdata, flags, rc): nonlocal cdl self.logger.info('MQTT client successfully connected to broker on host: {0}, port: {1}'.format(self.host, self.port)) cdl.count_down() self.client.on_connect = on_connect # Attempt to connect the client to the specified broker try: self.client.connect(self.host, self.port, self.keep_alive) except Exception as e: self.logger.error('MQTT client could not connect to broker at host: {0}, port: {1}'.format(self.host, self.port)) raise e # Starts MQTT client in background thread. This has to be done before the client will process any messages self.client.loop_start() # Have to start client before we wait on CDL. Client won't process any messages until we start it cdl.wait(timeout=timeout) def stop(self): """Handles stopping the MQTT client""" # Stops MQTT client self.client.loop_stop()
Python
0
@@ -3506,32 +3506,33 @@ %22%22%22%0A%0A +# with self.lock:%0A @@ -3523,36 +3523,32 @@ with self.lock:%0A - self.cal @@ -3582,20 +3582,16 @@ lback%7D)%0A - @@ -4423,32 +4423,34 @@ %22%22%22%0A%0A + # with self.lock: @@ -4442,36 +4442,32 @@ with self.lock:%0A - self.cli @@ -4491,20 +4491,16 @@ hannel)%0A -
1ed92c2577cdf8bfcf30310037dc2e802b665f26
jimmy files needs to return unjimmied file if ok
Wrappers/Shelx/Shelxc.py
Wrappers/Shelx/Shelxc.py
#!/usr/bin/env python # Shelxc.py # Copyright (C) 2006 CCLRC, Graeme Winter # # This code is distributed under the BSD license, a copy of which is # included in the root directory of this package. # # 16th November 2006 # # A wrapper for SHELXC from the SHELX phasing package. SHELXC prepares # the data for substructure determination, and needs to know the "names" # of the different data sets, e.g. PEAK INFL LREM HREM NATIVE. # For this to work it is assumed that these will be the dataset (e.g. # wavelength) names provided. # import sys import os import shutil if not os.path.join(os.environ['XIA2CORE_ROOT'], 'Python') in sys.path: sys.path.append(os.path.join(os.environ['XIA2CORE_ROOT'], 'Python')) if not os.environ['XIA2_ROOT'] in sys.path: sys.path.append(os.environ['XIA2_ROOT']) from Driver.DriverFactory import DriverFactory def Shelxc(DriverType = None): '''Create a Shelxc instance based on the DriverType.''' DriverInstance = DriverFactory.Driver(DriverType) class ShelxcWrapper(DriverInstance.__class__): '''A wrapper class for Shelxc.''' def __init__(self): DriverInstance.__class__.__init__(self) self.set_executable('shelxc') # input files self._infl = None self._lrem = None self._peak = None self._hrem = None self._sad = None self._native = None # heavy atom information self._n_sites = 0 # cell and symmetry self._cell = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] self._symmetry = None # naming information self._name = None # control information for shelxd (which will go in through the # .ins file) self._ntry = 20 self._mind = 3.5 # output information self._fa_hkl = None return def _jimmy_file_name(self, file): '''It appears that the maximum length of an input file name is 80 characters - so it it is longer than this copy the file.''' if len(file) < 70: return shutil.copyfile(file, os.path.join(self.get_working_directory(), os.path.split(file)[-1])) return os.path.split(file)[-1] def set_cell(self, cell): self._cell = cell return def set_symmetry(self, symmetry): self._symmetry = symmetry return def set_n_sites(self, n_sites): self._n_sites = n_sites return def set_peak(self, peak): self._peak = self._jimmy_file_name(peak) return def set_infl(self, infl): self._infl = self._jimmy_file_name(infl) return def set_lrem(self, lrem): self._lrem = self._jimmy_file_name(lrem) return def set_hrem(self, hrem): self._hrem = self._jimmy_file_name(hrem) return def set_native(self, native): self._native = self._jimmy_file_name(native) return def set_sad(self, sad): self._sad = self._jimmy_file_name(sad) return def set_name(self, name): self._name = name return def prepare(self): '''Prepare the experimental phasing data.''' self.add_command_line(self._name) self.start() if self._peak: self.input('PEAK %s\n' % self._peak) if self._infl: self.input('INFL %s\n' % self._infl) if self._hrem: self.input('HREM %s\n' % self._hrem) if self._lrem: self.input('LREM %s\n' % self._lrem) if self._sad: self.input('SAD %s\n' % self._sad) if self._native: self.input('NATIVE %s\n' % self._native) self.input('CELL %f %f %f %f %f %f' % tuple(self._cell)) self.input('SPAG %s' % self._symmetry) self.input('FIND %d' % self._n_sites) self.input('NTRY %d' % self._ntry) self.input('MIND %f' % (-1.0 * self._mind)) self.close_wait() # perform checks here for errors... self.check_for_errors() for line in self.get_all_output(): if 'Reflections written' in line and 'SHELXD/E' in line: self._fa_hkl = line.split()[5] return def get_fa_hkl(self): return self._fa_hkl return ShelxcWrapper() if __name__ == '__main__': # run a test data_dir = os.path.join(os.environ['X2TD_ROOT'], 'Test', 'UnitTest', 'Interfaces', 'Scaler', 'Unmerged') sc = Shelxc() sc.write_log_file('shelxc.log') sc.set_cell((57.74, 76.93, 86.57, 90.00, 90.00, 90.00)) # sc.set_symmetry('P212121') sc.set_symmetry('P222') sc.set_n_sites(5) sc.set_infl(os.path.join(data_dir, 'TS00_13185_unmerged_INFL.sca')) sc.set_lrem(os.path.join(data_dir, 'TS00_13185_unmerged_LREM.sca')) sc.set_peak(os.path.join(data_dir, 'TS00_13185_unmerged_PEAK.sca')) sc.set_name('TS00') sc.prepare()
Python
0.999797
@@ -2167,32 +2167,37 @@ return + file %0A%0A sh
3348796c4af9f1b5c7a6c619e7ae98379fea328d
Version file update
great_expectations/version.py
great_expectations/version.py
import os import subprocess import logging logger = logging.getLogger(__name__) tag = "0.8.0a1" git_directory = os.environ.get("GE_DEV_DIR") def get_git_describe_string(): return subprocess.check_output(["git", "describe"]).decode().strip() def get_git_revision_hash(): return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode() def get_git_revision_short_hash(): return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode() if git_directory is not None: start_dir = os.getcwd() try: os.chdir(git_directory) __version__ = get_git_describe_string() except subprocess.CalledProcessError: logger.warning("Unable to identify version tag using git.") __version__ = tag finally: os.chdir(start_dir) else: logger.debug("Using default version tag.") __version__ = tag
Python
0.000001
@@ -871,9 +871,8 @@ _ = tag%0A -%0A
01ec4fd2e294bcb524c6724d6727da7b1a882f0d
Exit code 2 for normal not running remote status
guild/commands/remote_impl.py
guild/commands/remote_impl.py
# Copyright 2017-2018 TensorHub, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import division import guild.remote from guild import cli from . import remote_support def start(args): remote = remote_support.remote_for_args(args) _remote_op(remote.start, "start", remote, True, args) def stop(args): remote = remote_support.remote_for_args(args) _remote_op(remote.stop, "stop", remote, False, args) def _remote_op(op, desc, remote, default_resp, args): if not args.yes: cli.out("You are about to %s %s" % (desc, remote.name)) if args.yes or cli.confirm("Continue?", default_resp): try: op() except guild.remote.OperationNotSupported as e: cli.error(e) except guild.remote.OperationError as e: cli.error(e) def status(args): remote = remote_support.remote_for_args(args) try: remote.status(args.verbose) except guild.remote.Down as e: cli.error("remote %s is not available (%s)" % (remote.name, e)) except guild.remote.OperationError as e: cli.error(e)
Python
0
@@ -1521,16 +1521,29 @@ i.error( +%0A %22remote @@ -1586,16 +1586,43 @@ name, e) +,%0A exit_status=2 )%0A ex
859d5ce6553b7651f05f27adec28e8c4330ca9bb
Add id of node generating the supervisor event
handler/supervisor_to_serf.py
handler/supervisor_to_serf.py
#!/usr/bin/env python import json import sys from utils import serf_event def write_stdout(s): sys.stdout.write(s) sys.stdout.flush() def write_stderr(s): sys.stderr.write(s) sys.stderr.flush() def main(): while True: write_stdout('READY\n') # transition from ACKNOWLEDGED to READY line = sys.stdin.readline() # read header line from stdin headers = dict(x.split(':') for x in line.split()) data = sys.stdin.read(int(headers['len'])) # read the event payload data_dict = dict(x.split(':') for x in data.split()) data_dict['eventname'] = headers['eventname'] serf_event('myevent', json.dumps(data_dict)) write_stdout('RESULT 2\nOK') # transition from READY to ACKNOWLEDGED if __name__ == '__main__': main()
Python
0.000001
@@ -626,16 +626,74 @@ tname'%5D%0A + data_dict%5B'node'%5D = serf('info')%5B'agent'%5D%5B'name'%5D%0A @@ -708,15 +708,18 @@ nt(' -myevent +supervisor ', j
20bc11aab936e96109c6d5407b22b0c40e256a54
total score should never be negative
pranger/front/views.py
pranger/front/views.py
# -*- coding: utf-8 -*- from __future__ import print_function, division, absolute_import, unicode_literals from collections import defaultdict from django.views.generic.base import TemplateView from django.views.generic.detail import DetailView from django.views.generic.list import ListView from braces.views import CanonicalSlugDetailMixin from . import models class HomeView(ListView): model = models.Website template_name = 'front/home.html' class WebsiteView(CanonicalSlugDetailMixin, DetailView): model = models.Website template_name = 'front/website.html' context_object_name = 'site' def get_context_data(self, **kwargs): context = super(WebsiteView, self).get_context_data(**kwargs) website = self.object scores = defaultdict(dict) if website.pw_max_length == 0: scores['negative']['MAX_LEN_16'] = -1 elif website.pw_max_length == 1: scores['negative']['MAX_LEN_10'] = -2 elif website.pw_max_length == 3: scores['positive']['MAX_LEN_UNLIMITED'] = 1 if website.alphabet_limited: scores['negative']['ALPHABET_LIMITED'] = -1 if website.eml_registration_plaintext: scores['negative']['PW_IN_REGISTRATION_MAIL'] = -1 if website.eml_recovery_plaintext: scores['negative']['NEW_PW_IN_REMEMBER_MAIL'] = -1 if website.eml_reminder_plaintext: scores['negative']['OWN_PW_IN_REMEMBER_MAIL'] = -3 if website.tls == 0: scores['negative']['TLS_NO'] = -2 elif website.tls == 1: scores['negative']['TLS_SOME'] = -1 elif website.tls == 2: scores['hint']['TLS_ALL'] = 0 elif website.tls == 3: scores['positive']['TLS_FORCED'] = 1 if website.two_factor: scores['positive']['TWO_FACTOR'] = 3 if website.pw_strength_indicator: scores['positive']['SECURITY_WIDGET'] = 1 scores['sum'] = self.calculate(scores['positive'].itervalues(), scores['negative'].itervalues()) context['scores'] = scores return context @staticmethod def calculate(positive, negative): sum_pos = sum(positive) if sum_pos > 6: sum_pos = 6 sum_neg = sum(negative) if sum_neg < -6: sum_neg = -6 return {'positive': sum_pos, 'negative': sum_neg, 'total': sum_neg + sum_pos} class InfoView(TemplateView): template_name = 'front/info.html'
Python
0.999999
@@ -2398,16 +2398,93 @@ eg = -6%0A + total = sum_neg + sum_pos%0A if total %3C 0%0A total = 0%0A @@ -2542,33 +2542,21 @@ total': -sum_neg + sum_pos +total %7D%0A%0A%0Aclas
2b0acfcd5a1fd5529ed8abf0af32fd986f3f4534
Add Uses and Provides classes to metadata module
wmtmetadata/metadata.py
wmtmetadata/metadata.py
"""Create the metadata files describing a WMT component.""" import os import warnings import json from wmtmetadata.utils import commonpath indent = 2 class MetadataBase(object): def __init__(self, component): self.filename = None self.data = None self.api = component['api'] self.files = component['files'] self.info = component['info'] self.parameters = component['parameters'] self.provides = component['provides'] self.uses = component['uses'] def write(self): with open(self.filename, 'w') as fp: json.dump(self.data, fp, indent=indent) class Files(MetadataBase): def __init__(self, component): super(Files, self).__init__(component) self.filename = 'files.json' self._prefix = self._get_prefix() self.data = [] for name in self.files: self.data.append(name[len(self._prefix):]) def _get_prefix(self): if len(self.files) == 1: prefix = os.path.dirname(prefix) else: prefix = commonpath(self.files) prefix += os.path.sep return prefix class Info(MetadataBase): def __init__(self, component): super(Info, self).__init__(component) self.filename = 'info.json' self.data = self.info.copy() for key in ['id', 'name', 'class']: self.data[key] = self.api['class'] try: self.data['initialize_args'] = self.api['initialize_args'] except KeyError: warnings.warn('missing initialize_args') self.data['initialize_args'] = ''
Python
0
@@ -64,16 +64,41 @@ port os%0A +import re%0Aimport fnmatch%0A import w @@ -117,16 +117,28 @@ rt json%0A +import yaml%0A from wmt @@ -170,16 +170,53 @@ monpath%0A +from wmtmetadata import metadata_dir%0A %0A%0Aindent @@ -587,16 +587,394 @@ %5B'uses'%5D +%0A self.component_config_file = os.path.join(metadata_dir,%0A self.api%5B'class'%5D,%0A 'wmt.yaml')%0A self.component_config = None%0A%0A def load_component_config(self):%0A with open(self.component_config_file, 'r') as fp:%0A self.component_config = yaml.load(fp) %0A%0A de @@ -2092,8 +2092,1040 @@ '%5D = ''%0A +%0A%0Aclass Ports(MetadataBase):%0A%0A def __init__(self, component):%0A super(Ports, self).__init__(component)%0A self.data = %5B%5D%0A self.load_component_config()%0A%0A def load(self):%0A port_type = type(self).__name__.lower()%0A port = self.component_config.get(port_type, %5B%5D)%0A for name in port:%0A names = %5B%5D%0A for pattern in port%5Bname%5D%5B'exchange_items'%5D:%0A p = re.compile(fnmatch.translate(pattern))%0A names.extend(filter(p.match, getattr(self, port_type)))%0A%0A self.data.append(%7B%0A 'id': name,%0A 'required': port%5Bname%5D%5B'required'%5D,%0A 'exchange_items': names,%0A %7D)%0A%0A%0Aclass Uses(Ports):%0A%0A def __init__(self, component):%0A super(Uses, self).__init__(component)%0A self.filename = 'uses.json'%0A self.load()%0A%0A%0Aclass Provides(Ports):%0A%0A def __init__(self, component):%0A super(Provides, self).__init__(component)%0A self.filename = 'provides.json'%0A self.load()%0A
e3916e6403b8933d9d8896b7289321c45b9990d2
Refactor if clause to check for string in list
wqflask/wqflask/docs.py
wqflask/wqflask/docs.py
import codecs from flask import g from wqflask.database import database_connection class Docs: def __init__(self, entry, start_vars={}): results = None with database_connection() as conn, conn.cursor() as cursor: cursor.execute("SELECT Docs.title, CAST(Docs.content AS BINARY) " "FROM Docs WHERE Docs.entry LIKE %s", (str(entry),)) result = cursor.fetchone() self.entry = entry if result: self.title = result[0] self.content = result[1].decode("utf-8") else: self.title = self.entry.capitalize() self.content = "" self.editable = "false" # ZS: Removing option to edit to see if text still gets vandalized try: if g.user_session.record['user_email_address'] == "zachary.a.sloan@gmail.com" or g.user_session.record['user_email_address'] == "labwilliams@gmail.com": self.editable = "true" except: pass def update_text(start_vars): content = start_vars['ckcontent'] content = content.replace('%', '%%').replace( '"', '\\"').replace("'", "\\'") try: if g.user_session.record['user_email_address'] == "zachary.a.sloan@gmail.com" or g.user_session.record['user_email_address'] == "labwilliams@gmail.com": with database_connection() as conn, conn.cursor() as cursor: cursor.execute("UPDATE Docs SET content=%s WHERE entry=%s", (content, start_vars.get("entry_type"),)) except: pass
Python
0
@@ -1169,17 +1169,16 @@ %22%5C%5C'%22)%0A -%0A try: @@ -1202,33 +1202,37 @@ r_session.record -%5B +.get( 'user_email_addr @@ -1227,37 +1227,38 @@ r_email_address' -%5D == +) in %5B %22zachary.a.sloan @@ -1272,58 +1272,9 @@ com%22 - or g.user_session.record%5B'user_email_address'%5D == +, %22la @@ -1285,32 +1285,33 @@ liams@gmail.com%22 +%5D :%0A wi
db38dc62a19ccb0979b22a89a00ca38236fbf098
Use maxsplits to only examine a single event at a time
sseclient.py
sseclient.py
import codecs import re import time import warnings import six import requests # Technically, we should support streams that mix line endings. This regex, # however, assumes that a system will provide consistent line endings. end_of_field = re.compile(r'\r\n\r\n|\r\r|\n\n') class SSEClient(object): def __init__(self, url, last_id=None, retry=3000, session=None, chunk_size=1024, **kwargs): self.url = url self.last_id = last_id self.retry = retry self.chunk_size = chunk_size # Optional support for passing in a requests.Session() self.session = session # Any extra kwargs will be fed into the requests.get call later. self.requests_kwargs = kwargs # The SSE spec requires making requests with Cache-Control: nocache if 'headers' not in self.requests_kwargs: self.requests_kwargs['headers'] = {} self.requests_kwargs['headers']['Cache-Control'] = 'no-cache' # The 'Accept' header is not required, but explicit > implicit self.requests_kwargs['headers']['Accept'] = 'text/event-stream' # Keep data here as it streams in self.buf = u'' self._connect() def _connect(self): if self.last_id: self.requests_kwargs['headers']['Last-Event-ID'] = self.last_id # Use session if set. Otherwise fall back to requests module. requester = self.session or requests self.resp = requester.get(self.url, stream=True, **self.requests_kwargs) self.resp_iterator = self.resp.iter_content(chunk_size=self.chunk_size) # TODO: Ensure we're handling redirects. Might also stick the 'origin' # attribute on Events like the Javascript spec requires. self.resp.raise_for_status() def _event_complete(self): return re.search(end_of_field, self.buf) is not None def __iter__(self): return self def __next__(self): decoder = codecs.getincrementaldecoder( self.resp.encoding)(errors='replace') while not self._event_complete(): try: next_chunk = next(self.resp_iterator) if not next_chunk: raise EOFError() self.buf += decoder.decode(next_chunk) except (StopIteration, requests.RequestException, EOFError) as e: time.sleep(self.retry / 1000.0) self._connect() # The SSE spec only supports resuming from a whole message, so # if we have half a message we should throw it out. head, sep, tail = self.buf.rpartition('\n') self.buf = head + sep continue split = re.split(end_of_field, self.buf) head = split[0] tail = "".join(split[1:]) self.buf = tail msg = Event.parse(head) # If the server requests a specific retry delay, we need to honor it. if msg.retry: self.retry = msg.retry # last_id should only be set if included in the message. It's not # forgotten if a message omits it. if msg.id: self.last_id = msg.id return msg if six.PY2: next = __next__ class Event(object): sse_line_pattern = re.compile('(?P<name>[^:]*):?( ?(?P<value>.*))?') def __init__(self, data='', event='message', id=None, retry=None): self.data = data self.event = event self.id = id self.retry = retry def dump(self): lines = [] if self.id: lines.append('id: %s' % self.id) # Only include an event line if it's not the default already. if self.event != 'message': lines.append('event: %s' % self.event) if self.retry: lines.append('retry: %s' % self.retry) lines.extend('data: %s' % d for d in self.data.split('\n')) return '\n'.join(lines) + '\n\n' @classmethod def parse(cls, raw): """ Given a possibly-multiline string representing an SSE message, parse it and return a Event object. """ msg = cls() for line in raw.splitlines(): m = cls.sse_line_pattern.match(line) if m is None: # Malformed line. Discard but warn. warnings.warn('Invalid SSE line: "%s"' % line, SyntaxWarning) continue name = m.group('name') if name == '': # line began with a ":", so is a comment. Ignore continue value = m.group('value') if name == 'data': # If we already have some data, then join to it with a newline. # Else this is it. if msg.data: msg.data = '%s\n%s' % (msg.data, value) else: msg.data = value elif name == 'event': msg.event = value elif name == 'id': msg.id = value elif name == 'retry': msg.retry = int(value) return msg def __str__(self): return self.data
Python
0.000032
@@ -2289,16 +2289,17 @@ _chunk)%0A +%0A @@ -2732,38 +2732,144 @@ -s +# S plit -= re.split(end_of_field, +the complete event (up to the end_of_field) into event_string,%0A # and retain anything after the current complete event in sel @@ -2873,17 +2873,16 @@ self.buf -) %0A @@ -2886,82 +2886,104 @@ -head = split%5B0%5D%0A tail = %22%22.join(split%5B1:%5D)%0A%0A self.buf = tail +# for next time.%0A (event_string, self.buf) = re.split(end_of_field, self.buf, maxsplit=1) %0A @@ -3005,20 +3005,28 @@ t.parse( -head +event_string )%0A%0A
895dd0343867e81a5129c1aa2c07c431b645d0f2
version 0.5.0
api/info.py
api/info.py
from collections import OrderedDict from rest_framework import viewsets, mixins, response, reverse NAME = 'vsemionov.notes.api' VERSION = '0.4.3' class InfoViewSet(mixins.ListModelMixin, viewsets.GenericViewSet): view_name = 'Info' @staticmethod def _get_user_url(request): return request.user.id and reverse.reverse('user-detail', request=request, args=[request.user.username]) def get_view_name(self): return self.view_name def list(self, request, *args, **kwargs): app = OrderedDict((('name', NAME), ('version', VERSION))) user = OrderedDict((('username', request.user.username), ('url', self._get_user_url(request)))) info = OrderedDict((('app', app), ('user', user))) return response.Response(info)
Python
0.000002
@@ -140,11 +140,11 @@ '0. -4.3 +5.0 '%0A%0A%0A
cee5e2aae5144fd1280240e069895049fe34de96
Update osmfilter.py
osm-atlas-get/osmfilter.py
osm-atlas-get/osmfilter.py
#!/usr/bin/env python # -*- coding: utf-8 -*- #interface for osmconvert+osmfilter def get_args(): p = argparse.ArgumentParser(description='Filter pbf file using osmfilter') p.add_argument('--filter', help='filter string', type=str) p.add_argument('--debug', '-d', help='debug mode', action='store_true') p.add_argument('source', help='source pbf file') p.add_argument('result', help='result pbf file') return p.parse_args() def process(filter, source_filename, result_filename, debug=false): o5m_unfiltered_filename = 'a' o5m_filtered_filename = 'a' print 'pbf to o5m' cmd='osmconvert {filename} -o={o5m_unfiltered_filename}'.format(filename=source_filename, o5m_unfiltered_filename=o5m_unfiltered_filename) if debug: print cmd os.system(cmd) print 'o5m tag filtration' cmd='osmfilter {o5m_unfiltered_filename} --drop-author --keep="{fl}" --out-o5m >{o5m_filtered_filename}'.format(o5m_unfiltered_filename=o5m_unfiltered_filename, fl=filter, o5m_filtered_filename = o5m_filtered_filename) if debug: print cmd os.system(cmd) print 'o5m to pbf' cmd='osmconvert {o5m_filtered_filename} -o={result_filename}'.format(o5m_filtered_filename=o5m_filtered_filename, result_filename = result_filename) if debug: print cmd os.system(cmd) args = get_args() process(filter=args.filter, source_filename = args.source, result_filename = args.result)
Python
0.000001
@@ -77,16 +77,43 @@ filter%0A%0A +import tempfile%0Aimport os%0A%0A def get_ @@ -558,39 +558,189 @@ m_unfiltered -_filename = 'a' + = tempfile.NamedTemporaryFile(suffix=%22.o5m%22)%0A o5m_filtered = tempfile.NamedTemporaryFile(suffix=%22.o5m%22)%0A o5m_unfiltered_filename = o5m_unfiltered.name %0A o5m @@ -764,11 +764,25 @@ e = -'a' +o5m_filtered.name %0A @@ -1967,16 +1967,109 @@ tem(cmd) +%0A %0A os.remove(o5m_unfiltered_filename)%0A os.remove(o5m_filtered_filename) %0A%0Aargs =
e24f7cbbc1495ccefeeb4c17d78b3afcc93208e1
Remove the @skip decorator for the whole class:
test/forward/TestForwardDeclaration.py
test/forward/TestForwardDeclaration.py
"""Test that forward declaration of a data structure gets resolved correctly.""" import os, time import unittest2 import lldb from lldbtest import * @unittest2.skip("rdar://problem/8641483 ./dotest.py -v -t -w forward seg faults") class ForwardDeclarationTestCase(TestBase): mydir = "forward" @unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin") def test_with_dsym_and_run_command(self): """Display *bar_ptr when stopped on a function with forward declaration of struct bar.""" self.buildDsym() self.forward_declaration() # rdar://problem/8546815 # './dotest.py -v -t forward' fails for test_with_dwarf_and_run_command @unittest2.expectedFailure def test_with_dwarf_and_run_command(self): """Display *bar_ptr when stopped on a function with forward declaration of struct bar.""" self.buildDwarf() self.forward_declaration() def forward_declaration(self): """Display *bar_ptr when stopped on a function with forward declaration of struct bar.""" exe = os.path.join(os.getcwd(), "a.out") self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET) # Break inside the foo function which takes a bar_ptr argument. self.expect("breakpoint set -n foo", BREAKPOINT_CREATED, startstr = "Breakpoint created: 1: name = 'foo', locations = 1") self.runCmd("run", RUN_SUCCEEDED) # The stop reason of the thread should be breakpoint. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs = ['state is stopped', 'stop reason = breakpoint']) # The breakpoint should have a hit count of 1. self.expect("breakpoint list", BREAKPOINT_HIT_ONCE, substrs = [' resolved, hit count = 1']) # This should display correctly. # Note that the member fields of a = 1 and b = 2 is by design. self.expect("frame variable -t *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY, substrs = ['(struct bar *) bar_ptr = ', '(int) a = 1', '(int) b = 2']) # And so should this. self.expect("expression *bar_ptr", VARIABLES_DISPLAYED_CORRECTLY, substrs = ['(struct bar)', '(int) a = 1', '(int) b = 2']) if __name__ == '__main__': import atexit lldb.SBDebugger.Initialize() atexit.register(lambda: lldb.SBDebugger.Terminate()) unittest2.main()
Python
0.001719
@@ -148,90 +148,8 @@ *%0A%0A -@unittest2.skip(%22rdar://problem/8641483 ./dotest.py -v -t -w forward seg faults%22)%0A clas @@ -493,24 +493,92 @@ laration()%0A%0A + # rdar://problem/8648070%0A # 'expression *bar_ptr' seg faults%0A # rdar:/ @@ -594,16 +594,16 @@ 8546815%0A - # '. @@ -689,23 +689,71 @@ st2. -expectedFailure +skip(%22rdar://problem/8648070 'expression *bar_ptr' seg faults%22) %0A
d4465950937f961d477041e1ff21cd9c24bceada
fix typos
km3modules/hits.py
km3modules/hits.py
"""Hit processing classes. This module defines 2 base classes: HitStatistics (e.g. to count n_doms) and HitSelector (e.g. FirstHits). """ import numpy as np import pandas as pd # noqa from scipy.stats import trimboth from km3pipe.dataclasses import HitSeries from km3pipe import Module class HitStatistics(Module): """Compute stuff on hits. Parameters ---------- key_in: str, default='Hits' Key of the hits to use. key_out: str, default='None' Key to write into. """ def __init__(self, **kwargs): super(HitStatistics, self).__init__(**kwargs) self.key_in = self.get('key_in') or 'Hits' self.key_out = self.get('key_out') or 'SelectedHits' def process(self, blob): """Read Hits, convert to pandas, compute stuff. """ hits = blob[self.key_in] hits = hits.serialise(to='pandas') blob[self.key_out] = self.compute(hits) return blob def compute(self, pd_hits): return class NDoms(HitStatistics): """Count active Doms. Parameters ---------- key_in: str, default='Hits' Key of the hits to use. key_out: str, default='n_doms' Key to write into. """ def __init__(self, **kwargs): super(NDoms, self).__init__(**kwargs) self.key_in = self.get('key_in') or 'Hits' self.key_out = self.get('key_out') or 'n_doms' def compute(self, pd_hits): dus = np.unique(pd_hits['dom_id']) return len(dus) class HitSelector(Module): """Select hits according to a criterion. Defaults to ``return hits``. Parameters ---------- key_in: str, default='Hits' Key of the hits to select. key_out: str, default='SelectedHits' Key to write into. """ def __init__(self, **kwargs): super(HitSelector, self).__init__(**kwargs) self.key_in = self.get('key_in') or 'Hits' self.key_out = self.get('key_out') or 'SelectedHits' def process(self, blob): """Read Hits, call ``process_hits``, store result. """ hits = blob[self.key_in] hits = self.process_hits(hits) blob[self.key_out] = hits return blob def process_hits(self, hits): """Convert to pandas, call ``select_hits``, convert to HitSeries. """ hits = hits.serialise(to='pandas') hits = self.select_hits(hits) return HitSeries.from_table(hits, event_id=None) def select_hits(self, pd_hits): """Defaults to nothing: ``return hits``.""" return pd_hits class FirstHits(HitSelector): """Select first hits on each dom. Parameters ---------- key_in: str, default='Hits' Key of the hits to select. key_out: str, default='FirstHits' Key to write into. """ def __init__(self, **kwargs): super(FirstHits, self).__init__(**kwargs) self.key_in = self.get('key_in') or 'Hits' self.key_out = self.get('key_out') or 'FirstHits' def process_hits(self, hits): # do not convert to pandas, the HitSeries method does the job. return hits.first_hits class TrimmedHits(HitSelector): """Select hits in a percentile range. E.g. pos_z, time, pos_3d. Parameters ---------- which: str, default=None The quantity to run the selection on. If None, just trim the hits in the order in which they appear. Otherwise sort by 'which'. Possible values are [None, 'time', 'pos_z']. trim: float, default=0.1 The amount to trim. Remove from trim to 1-trim. key_in: str, default='Hits' Key of the hits to trim. key_out: str, default='TrimmedHits' Key to write into. """ def __init__(self, **kwargs): super(TrimmedHits, self).__init__(**kwargs) self.which = self.get('which') or 'time' self.trim = self.get('trim') or 0.1 self.key_in = self.get('key_in') or 'Hits' self.key_out = self.get('key_out') or 'TrimmedHits' def select_hits(self, pd_hits): if self.which is None: n = len(pd_hits) idx = trimboth(range(n), self.trim) return pd_hits.iloc[idx] if self.which in {'time', 'pos_z'}: # noqa lo = df.quantile(self.trim)[self.which] hi = df.quantile(1 - self.trim)[self.which] return pd_hits.query( '{l} < {w} and {w} <= {h}'.format(l=lo, h=hi, w=self.which)) # if not matching anything above raise KeyError("which: '{}' not understood.".format(self.which))
Python
0.999974
@@ -1739,24 +1739,16 @@ efault=' -Selected Hits'%0A @@ -1960,32 +1960,24 @@ y_out') or ' -Selected Hits'%0A%0A d @@ -2428,38 +2428,38 @@ ies. -from_table(hits, event_id=None +deserialise(hits, fmt='pandas' )%0A%0A @@ -4273,18 +4273,23 @@ lo = -df +pd_hits .quantil @@ -4334,10 +4334,15 @@ i = -df +pd_hits .qua
fc3be727f7ab6a05da3d4e456482205ebb66b41a
Fix typo in parameter name
flocker/node/agents/functional/test_ebs.py
flocker/node/agents/functional/test_ebs.py
# Copyright Hybrid Logic Ltd. See LICENSE file for details. """ Functional tests for ``flocker.node.agents.ebs`` using an EC2 cluster. """ from uuid import uuid4 from bitmath import Byte from boto.exception import EC2ResponseError from twisted.trial.unittest import SkipTest from eliot.testing import LoggedMessage, capture_logging from ..ebs import _wait_for_volume, BOTO_EC2RESPONSE_ERROR from ..blockdevice import InformationUnavailable from .._logging import ( AWS_CODE, AWS_MESSAGE, AWS_REQUEST_ID, BOTO_LOG_HEADER, ) from ..test.test_blockdevice import make_iblockdeviceapi_tests from ..test.blockdevicefactory import ( InvalidConfig, ProviderType, get_blockdeviceapi_args, get_blockdeviceapi_with_cleanup, get_device_allocation_unit, get_minimum_allocatable_size, ) def ebsblockdeviceapi_for_test(test_case): """ Create an ``EBSBlockDeviceAPI`` for use by tests. """ return get_blockdeviceapi_with_cleanup(test_case, ProviderType.aws) class EBSBlockDeviceAPIInterfaceTests( make_iblockdeviceapi_tests( blockdevice_api_factory=( lambda test_case: ebsblockdeviceapi_for_test( test_case=test_case, ) ), minimum_allocatable_size=get_minimum_allocatable_size(), device_allocation_unit=get_device_allocation_unit(), unknown_blockdevice_id_factory=lambda test: u"vol-00000000", ) ): """ Interface adherence Tests for ``EBSBlockDeviceAPI``. """ def test_foreign_volume(self): """ Test that ``list_volumes`` lists only those volumes belonging to the current Flocker cluster. """ try: cls, kwargs = get_blockdeviceapi_args(ProviderType.aws) except InvalidConfig as e: raise SkipTest(str(e)) ec2_client = kwargs["ec2_client"] requested_volume = ec2_client.connection.create_volume( int(Byte(self.minimum_allocatable_size).to_GiB().value), ec2_client.zone) self.addCleanup(ec2_client.connection.delete_volume, requested_volume.id) _wait_for_volume(requested_volume, u'', u'creating', u'available') self.assertEqual(self.api.list_volumes(), []) def test_foreign_cluster_volume(self): """ Test that list_volumes() excludes volumes belonging to other Flocker clusters. """ blockdevice_api2 = ebsblockdeviceapi_for_test( test_case=self, ) flocker_volume = blockdevice_api2.create_volume( dataset_id=uuid4(), size=self.minimum_allocatable_size, ) self.assert_foreign_volume(flocker_volume) @capture_logging(lambda self, logger: None) def test_boto_ec2response_error(self, logger): """ 1. Test that invalid parameters to Boto's EBS API calls raise the right exception after logging to Eliot. 2. Verify Eliot log output for expected message fields from logging decorator for boto.exception.EC2Exception originating from boto.ec2.connection.EC2Connection. """ # Test 1: Create volume with size 0. # Raises: EC2ResponseError self.assertRaises(EC2ResponseError, self.api.create_volume, dataset_id=uuid4(), size=0,) # Test 2: Set EC2 connection zone to an invalid string. # Raises: EC2ResponseError self.api.zone = u'invalid_zone' self.assertRaises( EC2ResponseError, self.api.create_volume, dataset_id=uuid4(), size=self.minimum_allocatable_size, ) # Validate decorated method for exception logging # actually logged to ``Eliot`` logger. expected_message_keys = {AWS_CODE.key, AWS_MESSAGE.key, AWS_REQUEST_ID.key} for logged in LoggedMessage.of_type(logger.messages, BOTO_EC2RESPONSE_ERROR,): key_subset = set(key for key in expected_message_keys if key in logged.message.keys()) self.assertEqual(expected_message_keys, key_subset) @capture_logging(None) def test_boto_request_logging(self, logger): """ Boto is configured to send log events to Eliot when it makes an AWS API request. """ self.api.list_volumes() messages = list( message for message in logger.messages if message.get("message_type") == BOTO_LOG_HEADER ) self.assertNotEqual( [], messages, "Didn't find Boto messages in logged messages {}".format( messages ) ) def test_next_device_in_use(self): """ ``_next_device`` skips devices indicated as being in use. Ideally we'd have a test for this using the public API, but this only occurs if we hit eventually consistent ignorance in the AWS servers so it's hard to trigger deterministically. """ result = self.api._next_device(self.api.compute_instance_id(), [], {u"/dev/sdf"}) self.assertEqual(result, u"/dev/sdg") def test_device_path_information_unavailable(self): """ If ``get_device_path`` doesn't have the OS device path for an attached volume cached then it raises ``InformationUnavailable``. """ # Create and attach the volume using a second instance so that the # first one can't have anything in its cache. another_api = self.blockdevice_api_factory(test_case=self) volume = another_api.create_volume( dataset_isd=uuid4(), size=self.minimum_allocatable_size ) another_api.attach_volume( volume.blockdevice_id, another_api.compute_instance_id() ) exception = self.assertRaises( InformationUnavailable, self.api.get_device_path, volume.blockdevice_id, ) self.assertEqual(volume.blockdevice_id, exception.blockdevice_id)
Python
0.000558
@@ -5892,17 +5892,16 @@ ataset_i -s d=uuid4(
665943c0736cd83662bc8bebe072045f163b28c9
Revise func docstrring
alg_insertion_sort.py
alg_insertion_sort.py
from __future__ import absolute_import from __future__ import print_function from __future__ import division def insertion_sort(nums): """Insertion Sort algortihm. Time complexity: O(n^2). Space complexity: O(1). """ # Starting at pos i >= 1, swap (num[j-1], num[j]), for j=i,i-1,...,1, # if order is not correct. for i in range(1, len(nums)): for j in range(i, -1, -1): if j > 0 and nums[j - 1] > nums[j]: nums[j - 1], nums[j] = nums[j], nums[j - 1] def main(): nums = [54, 26, 93, 17, 77, 31, 44, 55, 20] insertion_sort(nums) print(nums) if __name__ == '__main__': main()
Python
0.000013
@@ -147,17 +147,17 @@ sertion -S +s ort algo
7ee2bae6ef9fa083cc83e04e283dc9789d513870
Make Ctrl-C correctly abort `smugcli login`.
smugcli/smugmug_oauth.py
smugcli/smugmug_oauth.py
# SumgMug OAuth client import bottle import rauth import requests import requests_oauthlib import socket import subprocess import threading from urllib import parse import webbrowser from wsgiref.simple_server import make_server OAUTH_ORIGIN = 'https://secure.smugmug.com' REQUEST_TOKEN_URL = OAUTH_ORIGIN + '/services/oauth/1.0a/getRequestToken' ACCESS_TOKEN_URL = OAUTH_ORIGIN + '/services/oauth/1.0a/getAccessToken' AUTHORIZE_URL = OAUTH_ORIGIN + '/services/oauth/1.0a/authorize' API_ORIGIN = 'https://api.smugmug.com' class SmugMugOAuth(object): def __init__(self, api_key): self._service = self._create_service(api_key) def _get_free_port(self): s = socket.socket() s.bind(('', 0)) port = s.getsockname()[1] s.close() return port def request_access_token(self): port = self._get_free_port() state = {'running': True, 'port': port} app = bottle.Bottle() app.route('/', callback=lambda s=state: self._index(s)) app.route('/callback', callback=lambda s=state: self._callback(s)) httpd = make_server('', port, app) def _handle_requests(httpd, state): try: while state['running']: httpd.handle_request() except: pass thread = threading.Thread(target=_handle_requests, args=(httpd, state)) thread.daemon = True try: thread.start() login_url = 'http://localhost:%d/' % port print('Started local server.') print('Visit %s to grant SmugCli access to your SmugMug account.' % login_url) print('Opening page in default browser...') if self._is_cygwin(): try: return_code = subprocess.call(['cygstart', login_url], stdout=subprocess.PIPE, stderr=subprocess.PIPE) success = (return_code == 0) except: success = False else: success = webbrowser.open(login_url) if not success: print('Could not start default browser automatically.') print('Please visit %s to complete login process.' % login_url) while thread.is_alive(): thread.join(1) finally: httpd.server_close() return state['access_token'], state['access_token_secret'] def get_oauth(self, access_token): return requests_oauthlib.OAuth1( self._service.consumer_key, self._service.consumer_secret, resource_owner_key=access_token[0], resource_owner_secret=access_token[1]) def _create_service(self, key): return rauth.OAuth1Service( name='smugcli', consumer_key=key[0], consumer_secret=key[1], request_token_url=REQUEST_TOKEN_URL, access_token_url=ACCESS_TOKEN_URL, authorize_url=AUTHORIZE_URL, base_url=API_ORIGIN + '/api/v2') def _index(self, state): """This route is where our client goes to begin the authorization process.""" (state['request_token'], state['request_token_secret']) = self._service.get_request_token( params={'oauth_callback': 'http://localhost:%d/callback' % state['port']}) auth_url = self._service.get_authorize_url(state['request_token']) auth_url = self._add_auth_params(auth_url, access='Full', permissions='Modify') bottle.redirect(auth_url) def _callback(self, state): """This route is where we receive the callback after the user accepts or rejects the authorization request.""" (state['access_token'], state['access_token_secret']) = self._service.get_access_token( state['request_token'], state['request_token_secret'], params={'oauth_verifier': bottle.request.query['oauth_verifier']}) state['running'] = False return 'Login successful. You may close this window.' def _add_auth_params(self, auth_url, access, permissions): parts = parse.urlsplit(auth_url) query = parse.parse_qsl(parts.query, True) query.append(('Access', access)) query.append(('Permissions', permissions)) new_query = parse.urlencode(query, True) return parse.urlunsplit( (parts.scheme, parts.netloc, parts.path, new_query, parts.fragment)) def _is_cygwin(self): try: return_code = subprocess.call(['which', 'cygstart'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) return (return_code == 0) except: return False
Python
0
@@ -63,33 +63,31 @@ ests -%0Aimport requests_oauthlib +_oauthlib%0Aimport signal %0Aimp @@ -115,16 +115,27 @@ process%0A +import sys%0A import t @@ -189,54 +189,8 @@ wser -%0Afrom wsgiref.simple_server import make_server %0A%0AOA @@ -838,19 +838,16 @@ port -%7D%0A app = +, 'app': bot @@ -858,24 +858,34 @@ Bottle() +%7D %0A +state%5B' app +'%5D .route(' @@ -933,19 +933,28 @@ s))%0A +state%5B' app +'%5D .route(' @@ -1013,112 +1013,115 @@ s))%0A +%0A -httpd = make_server('', port, app)%0A%0A def _handle_requests(httpd, state):%0A try:%0A while +def abort(signum, frame):%0A print('SIGINT received, aborting...')%0A state%5B'app'%5D.close()%0A sta @@ -1137,174 +1137,199 @@ ng'%5D -: +=False %0A - httpd.handle_request()%0A except:%0A pass%0A thread = threading.Thread(target=_handle_requests,%0A args=(httpd, state) +sys.exit(1)%0A signal.signal(signal.SIGINT, abort)%0A%0A def _start_web_server():%0A bottle.run(state%5B'app'%5D, port=port)%0A thread = threading.Thread(target=_start_web_server )%0A @@ -1351,16 +1351,17 @@ = True%0A +%0A try: @@ -1578,12 +1578,10 @@ ing -page +%25s in @@ -1595,24 +1595,36 @@ browser...' + %25 login_url )%0A if s @@ -2157,16 +2157,37 @@ while +state%5B'running'%5D and thread.i @@ -2243,21 +2243,21 @@ -httpd.server_ +state%5B'app'%5D. clos @@ -3729,24 +3729,49 @@ rifier'%5D%7D)%0A%0A + state%5B'app'%5D.close()%0A state%5B'r
61759cc90d5c95c5877d1ec8b538279c16101437
add star back in
clastic/routing.py
clastic/routing.py
# -*- coding: utf-8 -*- import re BINDING = re.compile(r'\<(?P<name>[A-Za-z_]\w*)(?P<op>[?+:]*)(?P<type>\w+)*\>') TYPES = {'int': int, 'float': float, 'unicode': unicode, 'str': unicode} _path_seg_tmpl = '(?P<%s>(/[\w%%\d])%s)' _OP_ARITY_MAP = {'': False, # whether or not an op is "multi" '?': False, ':': False, '+': True, '*': True} def build_converter(converter, optional=False, multi=False): if multi: def multi_converter(value): if not value and optional: return [] return [converter(v) for v in value.split('/')[1:]] return multi_converter def single_converter(value): if not value and optional: return None return converter(value.replace('/', '')) return single_converter def collapse_token(text, token=None, sub=None): "Collapses whitespace to spaces by default" if token is None: sub = sub or ' ' return ' '.join(text.split()) else: sub = sub or token return sub.join([s for s in text.split(token) if s]) class RoutePattern(object): def __init__(self, pattern): self.pattern = pattern self.regex, self.converters = self._compile(pattern) def match_url(self, url): ret = {} match = self.regex.match(url) if not match: return None groups = match.groupdict() try: for conv_name, conv in self.converters.items(): ret[conv_name] = conv(groups[conv_name]) except KeyError: return None # TODO except (TypeError, ValueError): return None return ret def _compile(self, pattern): processed = [] var_converter_map = {} for part in pattern.split('/'): match = BINDING.match(part) if not match: processed.append(part) continue parsed = match.groupdict() name, type_name, op = parsed['name'], parsed['type'], parsed['op'] if name in var_converter_map: raise ValueError('duplicate path binding %s' % name) if op: if op == ':': op = '' if not type_name: raise ValueError('%s expected a type specifier' % part) try: converter = TYPES[type_name] except KeyError: raise ValueError('unknown type specifier %s' % type_name) else: converter = unicode try: multi = _OP_ARITY_MAP[op] except KeyError: _tmpl = 'unknown arity operator %r, expected one of %r' raise ValueError(_tmpl % (op, _OP_ARITY_MAP.keys())) var_converter_map[name] = build_converter(converter, multi=multi) path_seg_pattern = _path_seg_tmpl % (name, op) processed[-1] += path_seg_pattern regex = re.compile('/'.join(processed)) return regex, var_converter_map def _main(): rp = RoutePattern('/a/b/<t:int>/thing/<das+int>') d = rp.match_url('/a/b/1/thing/1/2/3/4/') print d d = rp.match_url('/a/b/1/thing/hi/') print d if __name__ == '__main__': _main() """ Routing notes ------------- After being betrayed by Werkzeug routing in too many fashions, and after reviewing many designs, a new routing scheme has been designed. Clastic's existing pattern (inherited from Werkzeug) does have some nice things going for it. Django routes with regexes, which can be semantically confusing, bug-prone, and unspecialized for URLs. Clastic/Werkzeug offer a constrained syntax/grammar that is specialized to URL pattern generation. It aims to be: * Clear * Correct * Validatable The last item is of course the most important. (Lookin at you Werkzeug.) Since Werkzeug's constraining of syntax led to a better system, Clastic's routing took it a step further. Take a look at some examples: 1. '/about/' 2. '/blog/{post_id?int}' 3. '/api/{service}/{path+}' 4. '/polish_maths/{operation:str}/{numbers+float}' 1. Static patterns work as expected. 2. The '?' indicates "zero or one", like regex. The post_id will be converted to an integer. Invalid or missing values yield a value of None into the 0-or-1 binding. 3. Bindings are of type 'str' (i.e., string/text/unicode object) by default, so here we have a single-segment, string 'service' binding. We also accept a 'path' binding. '+' means 1-or-more, and the type is string. 4. Here we do some Polish-notation math. The operation comes first. Using an explicit 'str' is ok. Numbers is a repeating path of floats. Besides correctness, there are a couple improvements over Werkzeug. The system does not mix type and arity (Werkzeug's "path" converter was special because it consumed more than one path segment). There are just a few built-in converters, for the convenience of easy type conversion, not full-blown validation. It's always confusing to get a vague 404 when better error messages could have been produced (there are middlewares available for this). (Also, in Werkzeug I found the commonly-used '<path:path>' to be confusing. Which is the variable, which is the converter? {path+} is better ;)) # TODO: should slashes be optional? _shouldn't they_? # TODO: detect invalid URL pattern # TODO: ugly corollary? unicode characters. (maybe) """
Python
0.000002
@@ -88,16 +88,17 @@ %3Cop%3E%5B?+: +* %5D*)(?P%3Ct @@ -3315,16 +3315,172 @@ rint d%0A%0A + d = rp.match_url('/a/b/1/thing/')%0A print d%0A%0A rp = RoutePattern('/a/b/%3Ct:int%3E/thing/%3Cdas*int%3E')%0A d = rp.match_url('/a/b/1/thing/')%0A print d%0A%0A %0Aif __na
1b47086e3ef45b6e668ed330ac017badc0afae96
Add opbeat contrib
defprogramming/settings.py
defprogramming/settings.py
# Django settings for defprogramming project. import os BASE_DIR = os.path.abspath(os.path.dirname(__file__)) ADMINS = () MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db/development.sqlite3'), } } TIME_ZONE = 'America/Chicago' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = True USE_L10N = True MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') STATIC_URL = '/static/' ADMIN_MEDIA_PREFIX = '/static/admin/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) SECRET_KEY = 'secret' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'urls' TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates/'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'django.contrib.syndication', 'django.contrib.flatpages', 'django.contrib.sitemaps', 'south', 'quotes', 'sorl.thumbnail', 'django_medusa', ) MEDUSA_RENDERER_CLASS = "django_medusa.renderers.DiskStaticSiteRenderer" MEDUSA_MULTITHREAD = False MEDUSA_DEPLOY_DIR = os.path.join(BASE_DIR, 'cache', 'html') DEFAULT_CACHE_TIME = 60 * 15 try: from settings_local import * # NOQA except ImportError: from warnings import warn msg = "You don't have settings_local.py file, using defaults settings." try: # don't work in Python 2.4 or before warn(msg, category=ImportWarning) except NameError: warn(msg)
Python
0
@@ -1673,16 +1673,45 @@ edusa',%0A + 'opbeat.contrib.django',%0A )%0A%0AMEDUS
9bb587c6650c0de64c1ec582c6b3f73e38e300c9
Correct max lines on log clipping
changes/listeners/mail.py
changes/listeners/mail.py
from __future__ import absolute_import, print_function from flask import current_app, render_template from flask_mail import Message, sanitize_address from changes.config import db, mail from changes.constants import Result, Status from changes.models import Build, TestGroup, ProjectOption, LogSource, LogChunk def build_uri(path): return str('{base_uri}/{path}'.format( base_uri=current_app.config['BASE_URI'].rstrip('/'), path=path.lstrip('/'), )) def get_test_failures(build): return sorted([t.name_sha for t in db.session.query( TestGroup.name_sha, ).filter( TestGroup.build_id == build.id, TestGroup.result == Result.failed, TestGroup.num_leaves == 0, )]) def did_cause_breakage(build): """ Compare with parent build (previous build) and confirm if current build provided any change in state (e.g. new failures). """ if build.result != Result.failed: return False parent = Build.query.filter( Build.revision_sha != None, # NOQA Build.patch_id == None, Build.revision_sha != build.revision_sha, Build.date_created < build.date_created, Build.status == Status.finished, ).order_by(Build.date_created.desc()).first() # if theres no parent, this build must be at fault if parent is None: return True if parent.result == Result.passed: return True current_failures = get_test_failures(build) # if we dont have any testgroup failures, then we cannot identify the cause # so we must notify the individual if not current_failures: return True parent_failures = get_test_failures(parent) if parent_failures != current_failures: return True return False def send_notification(build, recipients): # TODO(dcramer): we should send a clipping of a relevant build log test_failures = TestGroup.query.filter( TestGroup.build_id == build.id, TestGroup.result == Result.failed, TestGroup.num_leaves == 0, ).order_by(TestGroup.name.asc()) num_test_failures = test_failures.count() test_failures = test_failures[:25] # TODO(dcramer): we should probably find a better way to do logs primary_log = LogSource.query.filter( LogSource.build_id == build.id, ).order_by(LogSource.date_created.asc()).first() if primary_log: queryset = LogChunk.query.filter( LogChunk.source_id == primary_log.id, ).order_by(LogChunk.offset.desc()) tail = queryset.limit(1).first() log_chunks = list(queryset.filter( LogChunk.offset <= tail.offset, (LogChunk.offset + LogChunk.size) >= max(tail.offset - 5000, 0), ).order_by(LogChunk.offset.asc())) log_clipping = ''.join(l.text for l in log_chunks)[-5000:] # only return the last 25 lines log_clipping = '\n'.join(log_clipping.splitlines()[:-25]) subject = u"Build {result} - {target} ({project})".format( result=unicode(build.result), target=build.target or build.revision_sha or 'Unknown', project=build.project.name, ) for testgroup in test_failures: testgroup.uri = build_uri('/testgroups/{0}/'.format(testgroup.id.hex)) build.uri = build_uri('/builds/{0}/'.format(build.id.hex)) context = { 'build': build, 'total_test_failures': num_test_failures, 'test_failures': test_failures, } if primary_log: context['build_log'] = { 'text': log_clipping, 'name': primary_log.name, 'link': '{0}logs/{1}/'.format(build.uri, primary_log.id.hex), } msg = Message(subject, recipients=recipients, extra_headers={ 'Reply-To': ', '.join(sanitize_address(r) for r in recipients), }) msg.body = render_template('listeners/mail/notification.txt', **context) msg.html = render_template('listeners/mail/notification.html', **context) mail.send(msg) def build_finished_handler(build, **kwargs): # get relevant options options = dict( db.session.query( ProjectOption.name, ProjectOption.value ).filter( ProjectOption.project_id == build.project_id, ProjectOption.name.in_([ 'mail.notify-author', 'mail.notify-addresses', 'mail.notify-addresses-revisions', ]) ) ) recipients = [] if options.get('mail.notify-author', '1') == '1': author = build.author if author: recipients.append(u'%s <%s>' % (author.name, author.email)) if options.get('mail.notify-addresses'): recipients.extend( # XXX(dcramer): we dont have option validators so lets assume people # enter slightly incorrect values [x.strip() for x in options['mail.notify-addresses'].split(',')] ) if not build.patch_id: if options.get('mail.notify-addresses-revisions'): recipients.extend( [x.strip() for x in options['mail.notify-addresses-revisions'].split(',')] ) if not recipients: return if not did_cause_breakage(build): return send_notification(build, recipients)
Python
0.000001
@@ -2953,12 +2953,12 @@ s()%5B -: -25 +: %5D)%0A%0A
22029728795a850d1b57824c6a91ddd5378f9760
fix some typos
robj/__init__.py
robj/__init__.py
# # Copyright (c) 2010 rPath, Inc. # # This program is distributed under the terms of the MIT License as found # in a file called LICENSE. If it is not present, the license # is always available at http://www.opensource.org/licenses/mit-license.php. # # This program is distributed in the hope that it will be useful, but # without any waranty; without even the implied warranty of merchantability # or fitness for a particular purpose. See the MIT License for full details. # """ rObj REST Client Library This library is primarily intened for use with rPath provied REST APIs, but should be able to interact with other REST based services that follow similar best practices. Example usage: >>> import robj >>> api = robj.connect('http://www.rpath.org/api/') >>> products = api.products >>> print products[0].name """ from robj.glue import HTTPClient as _HTTPClient from robj.lib.log import setupLogging as _setupLogging __ALL__ = ['rObj', 'connect', 'open', ] def rObj(uri, headers=None, maxClients=None, maxConnections=None, logging=True): """ @param uri: URI for connectiong to the root of the desired web service. This may contain user information and must be http or https. @type uri: str @param headers: Any headers that should be included in all requets. @type headers: dict @param maxClients: The maximum number of workers that will be created to handle requets. Works are created as needed, rather than being preallocated. (default: 10) @type maxClients: int @param maxConnections: The maximum number of connections each client thread should cache. Client threads only cache one connection per host. This should only matter if you are talking to multiple hosts. (default: 2) @type maxConnections: int @param logging: Set up a logger. @type logging: boolean """ # Setup logging if requested. if logging: # FIXME: Let people specify log files somehow. _setupLogging() # Instantiate the http client. client = _HTTPClient(uri, headers=headers, maxClients=maxClients, maxConnections=maxConnections) # Get the root rObj robj = client.do_GET('/') return robj connect = open = rObj
Python
0.999999
@@ -533,16 +533,17 @@ ly inten +d ed for u @@ -561,16 +561,17 @@ th provi +d ed REST
1b6b7de39dcb80ff083bd21c6665c0dcaa5200fa
Update last_api_activity in Tooltron add_card_event view.
robocrm/views.py
robocrm/views.py
from django.http import HttpResponse from django.core.exceptions import PermissionDenied, ObjectDoesNotExist from django.contrib.auth import authenticate, login from api.models import APIRequest from django.views.decorators.http import require_POST from projects.models import Project from .models import * def roboauth(request, rfid_tag, mach_num): r = RoboUser.objects.filter(rfid=rfid_tag) if r.count() > 0: us = r[0] else: return HttpResponse("0") auth_machines = us.machines.filter(id=mach_num) if auth_machines.count() > 0 : return HttpResponse("1") else : return HttpResponse("0") def roboauthall(request, rfid_tag): r = RoboUser.objects.filter(rfid=rfid_tag) if r.count() > 0: us = r[0] else: return HttpResponse("0") auth = 0 for mach in us.machines.all(): auth |= 1 << int(mach.id) return HttpResponse(str(auth)) @require_POST def add_card_event(request): if 'username' in request.POST and 'password' in request.POST: user = authenticate(username=request.POST['username'], password=request.POST['password']) if user is not None and user.is_active: login(request, user) tstart = request.POST['tstart'] # TODO: convert to date tend = request.POST['tend'] user_id = request.POST['user_id'] succ = request.POST['succ'] == '1' machine_id = int(request.POST['machine_id']) try: robouser = RoboUser.objects.get(rfid__iexact=user_id) except ObjectDoesNotExist: robouser = None machine = Machine.objects.get(id__exact=machine_id) api_request = APIRequest( endpoint="/rfid/", updater_object=Project.objects.get(name="Tooltron"), user=robouser, success=succ, meta=machine.type, api_client="", ) api_request.save() # Cannot update updated_datetime with tend # because would be overwritten on save however # does not matter because Tooltron pushes # card events every 70ms which a lower resolution # that what tend even provides so update_datetime # being the value when this save() is called is okay api_request.created_datetime = tstart api_request.save() return HttpResponse()
Python
0
@@ -278,16 +278,50 @@ Project%0A +from django.utils import timezone%0A from .mo @@ -1213,17 +1213,21 @@ POST -%5B +.get( 'tstart' %5D # @@ -1222,17 +1222,17 @@ 'tstart' -%5D +) # TODO: @@ -1273,16 +1273,20 @@ POST -%5B +.get( 'tend' -%5D +) %0A u @@ -1302,25 +1302,29 @@ request.POST -%5B +.get( 'user_id'%5D%0A @@ -1320,17 +1320,20 @@ user_id' -%5D +, 0) %0A succ @@ -1350,16 +1350,20 @@ POST -%5B +.get( 'succ' -%5D +) == @@ -1397,17 +1397,21 @@ est.POST -%5B +.get( 'machine @@ -1414,17 +1414,20 @@ hine_id' -%5D +, 1) )%0A%0A try @@ -1591,16 +1591,67 @@ ne_id)%0A%0A + tooltron = Project.objects.get(name=%22Tooltron%22)%0A%0A api_re @@ -1716,44 +1716,16 @@ ect= -Project.objects.get(name=%22T +t ooltron -%22) ,%0A @@ -2188,16 +2188,259 @@ save()%0A%0A + # Since Tooltron (for now) does not use Standard API%0A # manually update it's last_activity field so Officers have the benefit%0A # of easily being able to see if it is working%0A tooltron.last_api_activity = timezone.now()%0A tooltron.save()%0A%0A return
1d1f5003a6493cbd8556b4f16d5a591d1cc2ace2
Update VersionOneAgent3.py
PlatformAgents/com/cognizant/devops/platformagents/agents/alm/versionone/VersionOneAgent3.py
PlatformAgents/com/cognizant/devops/platformagents/agents/alm/versionone/VersionOneAgent3.py
#------------------------------------------------------------------------------- # -*- coding: utf-8 -*- # Copyright 2017 Cognizant Technology Solutions # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. #------------------------------------------------------------------------------- ''' Created on Jul 6, 2017 @author: 463188 ''' # Optimization and Pagination might be required. This is the first cut working agent with incremental fetch from dateutil import parser from ....core.BaseAgent import BaseAgent from urllib.parse import quote import time import json,ast class VersionOneAgent(BaseAgent): def process(self): userid = self.config.get("userid", '') passwd = self.config.get("passwd", '') baseUrl = self.config.get("baseUrl", '') project = self.config.get("project", '') startFrom = self.config.get("startFrom", '') startFrom = parser.parse(startFrom) startFrom = startFrom.strftime('%Y-%m-%dT%H:%M:%S') reqHeaders = { "Content-Type" : "text/xml", "Accept" : "application/json" } responseTemplate = self.config.get('dynamicTemplate', {}).get('responseTemplate', None) hierachiesUrl = baseUrl+"Story?where=SecurityScope.Name='"+project+"'&sel=Name,Number,ChangeDate,Timebox.Name,Scope.Name,Status.Name,Estimate" hierachies = self.getResponse(hierachiesUrl, 'GET', userid, passwd, None,reqHeaders=reqHeaders) for hierarchy in hierachies["Assets"]: injectData = {} data = [] name = hierarchy['Attributes']['Name']['value'] since = self.tracking.get(name,None) if since == None: lastUpdated = startFrom else: since = parser.parse(since) since = since.strftime('%Y-%m-%dT%H:%M:%S') lastUpdated = since date = hierarchy['Attributes']['ChangeDate']['value'] date = parser.parse(date) date = date.strftime('%Y-%m-%dT%H:%M:%S') if since == None or date > since: injectData['storyName']=str(hierarchy['Attributes']['Name']['value']) injectData['id']=str(hierarchy['Attributes']['Number']['value']) injectData['sprintName']=str(hierarchy['Attributes']['Timebox.Name']['value']) injectData['projectName']=str(hierarchy['Attributes']['Scope.Name']['value']) injectData['status']=str(hierarchy['Attributes']['Status.Name']['value']) injectData['estimate']=str(hierarchy['Attributes']['Estimate']['value']) injectData['lastUpdateDate']=date data.append(injectData) fromDateTime=date else: fromDateTime = lastUpdated if len(hierachies)>0 and len(data)!=0: self.tracking[name] = fromDateTime versionOneMetadata = {"dataUpdateSupported" : True,"uniqueKey" : ["id"]} #self.publishToolsData(data) self.publishToolsData(data, versionOneMetadata) self.updateTrackingJson(self.tracking) if __name__ == "__main__": VersionOneAgent()
Python
0
@@ -981,16 +981,17 @@ aseAgent +3 import
bd548cc863754533b8a5d6cff21455c91061fbce
Changing the action keys to lowercase
rofi/shutdown.py
rofi/shutdown.py
#!/usr/bin/env python3 import glob import logging import os import sys from rofi import Rofi CURRENT_SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0] LOG_FORMAT = ('[%(asctime)s PID %(process)s ' '%(filename)s:%(lineno)s - %(funcName)s()] ' '%(levelname)s -> \n' '%(message)s\n') # Configure the logging to console. Works from python 3.3+ logging.basicConfig( format=LOG_FORMAT, level=logging.INFO, handlers=[logging.StreamHandler(sys.stdout)] ) ACTIONS = [ ('Switch User', 'dm-tool swith-to-greeter',), ('Logoff', 'i3-msg exit',), ('Shutdown', 'sudo /sbin/shutdown -h now',), ('Restart', 'sudo /sbin/shutdown -r now',) ] if __name__ == "__main__": actions_list = [element[0] for element in ACTIONS] rofi_client = Rofi() selected, keyboard_key = rofi_client.select( 'Choose your destiny', actions_list, rofi_args=['-i'] # case insensitive ) logging.info(f'keyboard_key pressed={keyboard_key}') if keyboard_key == -1: logging.info('cancelled') rofi_client.exit_with_error('Cancelled, nothing to be done.') logging.info(f'selected={selected}') command = ACTIONS[selected][1] logging.info(f'Running command: {command}') # os.system(command)
Python
0.999994
@@ -532,16 +532,16 @@ (' -S +s witch -U +u ser' @@ -578,17 +578,17 @@ ,%0A (' -L +l ogoff', @@ -610,17 +610,17 @@ ,%0A (' -S +s hutdown' @@ -659,17 +659,17 @@ ,%0A (' -R +r estart', @@ -916,54 +916,8 @@ list -,%0A rofi_args=%5B'-i'%5D # case insensitive %0A @@ -1239,13 +1239,11 @@ %7D')%0A + - # os.
ad4b65346c5b38313ef9385984ffd28a329c7376
fix event loop
shadowsocks/eventloop.py
shadowsocks/eventloop.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2014 clowwindy # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # from ssloop # https://github.com/clowwindy/ssloop import select from collections import defaultdict __all__ = ['EventLoop', 'MODE_NULL', 'MODE_IN', 'MODE_OUT', 'MODE_ERR', 'MODE_HUP', 'MODE_NVAL'] MODE_NULL = 0x00 MODE_IN = 0x01 MODE_OUT = 0x04 MODE_ERR = 0x08 MODE_HUP = 0x10 MODE_NVAL = 0x20 class EpollLoop(object): def __init__(self): self._epoll = select.epoll() def poll(self, timeout): return self._epoll.poll(timeout) def add_fd(self, fd, mode): self._epoll.register(fd, mode) def remove_fd(self, fd): self._epoll.unregister(fd) def modify_fd(self, fd, mode): self._epoll.modify(fd, mode) class KqueueLoop(object): MAX_EVENTS = 1024 def __init__(self): self._kqueue = select.kqueue() self._fds = {} def _control(self, fd, mode, flags): events = [] if mode & MODE_IN: events.append(select.kevent(fd, select.KQ_FILTER_READ, flags)) if mode & MODE_OUT: events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags)) for e in events: self._kqueue.control([e], 0) def poll(self, timeout): if timeout < 0: timeout = None # kqueue behaviour events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout) results = defaultdict(lambda: MODE_NULL) for e in events: fd = e.ident if e.filter == select.KQ_FILTER_READ: results[fd] |= MODE_IN elif e.filter == select.KQ_FILTER_WRITE: results[fd] |= MODE_OUT return results.iteritems() def add_fd(self, fd, mode): self._fds[fd] = mode self._control(fd, mode, select.KQ_EV_ADD) def remove_fd(self, fd): self._control(fd, self._fds[fd], select.KQ_EV_DELETE) del self._fds[fd] def modify_fd(self, fd, mode): self.remove_fd(fd) self.add_fd(fd, mode) class SelectLoop(object): def __init__(self): self._r_list = set() self._w_list = set() self._x_list = set() def poll(self, timeout): r, w, x = select.select(self._r_list, self._w_list, self._x_list, timeout) results = defaultdict(lambda: MODE_NULL) for p in [(r, MODE_IN), (w, MODE_OUT), (x, MODE_ERR)]: for fd in p[0]: results[fd] |= p[1] return results.items() def add_fd(self, fd, mode): if mode & MODE_IN: self._r_list.add(fd) if mode & MODE_OUT: self._w_list.add(fd) if mode & MODE_ERR: self._x_list.add(fd) def remove_fd(self, fd): if fd in self._r_list: self._r_list.remove(fd) if fd in self._w_list: self._w_list.remove(fd) if fd in self._x_list: self._x_list.remove(fd) def modify_fd(self, fd, mode): self.remove_fd(fd) self.add_fd(fd, mode) class EventLoop(object): def __init__(self): if hasattr(select, 'epoll'): self._impl = EpollLoop() elif hasattr(select, 'kqueue'): self._impl = KqueueLoop() elif hasattr(select, 'select'): self._impl = SelectLoop() else: raise Exception('can not find any available functions in select ' 'package') self._fd_to_f = defaultdict(list) def poll(self, timeout=None): events = self._impl.poll(timeout) return ((self._fd_to_f[fd], event) for fd, event in events) def add(self, f, mode): fd = f.fileno() self._fd_to_f[fd].append(f) self._impl.add_fd(fd, mode) def remove(self, f): fd = f.fileno() a = self._fd_to_f[fd] if len(a) <= 1: self._fd_to_f[fd] = None else: a.remove(f) self._impl.remove_fd(fd) def modify(self, f, mode): fd = f.fileno() self._impl.modify_fd(fd, mode)
Python
0.000019
@@ -4566,25 +4566,10 @@ f = -defaultdict(list) +%7B%7D %0A%0A @@ -4792,18 +4792,12 @@ %5Bfd%5D -.append(f) + = f %0A @@ -4891,128 +4891,32 @@ -a = self._fd_to_f%5Bfd%5D%0A if len(a) %3C= 1:%0A self._fd_to_f%5Bfd%5D = None%0A else:%0A a.remove(f) +self._fd_to_f%5Bfd%5D = None %0A
ea1189790a3a0941c669a981d5a351de57a1b5ce
Use the oc.NWChemJsonReader() class.
docker/nwchem/src/run.py
docker/nwchem/src/run.py
import os import subprocess import jinja2 import json import openchemistry as oc def run_calculation(geometry_file, output_file, params, scratch_dir): # Read in the geometry from the geometry file # This container expects the geometry file to be in .xyz format with open(geometry_file) as f: xyz_structure = f.read() # remove the first two lines in the xyz file # (i.e. number of atom and optional comment) xyz_structure = xyz_structure.split('\n')[2:] xyz_structure = '\n '.join(xyz_structure) # Read the input parameters theory = params.get('theory', 'hf') task = params.get('task', 'energy') basis = params.get('basis', 'cc-pvdz') functional = params.get('functional', 'b3lyp') charge = params.get('charge', 0) multiplicity = params.get('multiplicity', 1) if theory.lower() == 'ks': _theory = functional else: _theory = 'scf' reference = theory.lower() if multiplicity == 1: reference = 'r' + reference else: reference = 'u' + reference optimization = params.get('optimization', None) vibrational = params.get('vibrational', None) charge = params.get('charge', 0) multiplicity = params.get('multiplicity', 1) theory = params.get('theory', 'scf') functional = params.get('functional', 'b3lyp') basis = params.get('basis', 'cc-pvdz') context = { 'task': task, 'theory': _theory, 'reference': reference, 'charge': charge, 'multiplicity': multiplicity, 'basis': basis } context['functional'] = functional # Combine the input parameters and geometry into a concrete input file # that can be executed by the simulation code template_path = os.path.dirname(__file__) jinja2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path), trim_blocks=True) os.makedirs(scratch_dir, exist_ok=True) os.chdir(scratch_dir) raw_input_file = os.path.join(scratch_dir, 'raw.in') raw_output_file = os.path.join(scratch_dir, 'raw.out') with open(raw_input_file, 'wb') as f: jinja2_env.get_template('nwchem.in.j2').stream(**context, xyz_structure=xyz_structure).dump(f, encoding='utf8') # Execute the code and write to output subprocess.run(["/opt/nwchem/bin/LINUX64/nwchem", raw_input_file, raw_output_file]) # Convert the raw output file generated by the code execution, into the # output format declared in the container description (cjson) with open(raw_output_file) as f: cjson = oc.Psi4Reader(f).read() # Save the calculation parameters in the cjson output for future reference cjson['inputParameters'] = params with open(output_file, 'w') as f: json.dump(cjson, f)
Python
0
@@ -1587,20 +1587,16 @@ s%0A %7D%0A - %0A con @@ -2621,12 +2621,18 @@ oc. -Psi4 +NWChemJson Read
2a984234d6bef4667af9549459e1fd85fb213626
Bump version to v1.14.20
client/__init__.py
client/__init__.py
__version__ = 'v1.14.19' FILE_NAME = 'ok' import os import sys sys.path.insert(0, '') # Add directory in which the ok.zip is stored to sys.path. sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
Python
0
@@ -18,10 +18,10 @@ .14. -19 +20 '%0A%0AF
6832112d69d8751229ca25f8269abcc6d82a3732
fix config filename
kuyruk/__main__.py
kuyruk/__main__.py
from __future__ import absolute_import import os import ast import logging import argparse from kuyruk import __version__, requeue, manager from kuyruk.worker import Worker from kuyruk.master import Master from kuyruk.config import Config logger = logging.getLogger(__name__) def worker(config, args): w = Worker(config, args.queue) w.run() def master(config, args): m = Master(config) m.run() def main(): parser = argparse.ArgumentParser() # Add common options parser.add_argument( '-v', '--version', action='version', version=__version__) parser.add_argument( '--config', help='Python file containing Kuyruk configuration parameters') parser.add_argument( '-d', '--delete-config', action='store_true', help='delete config after loading (used internally)') add_config_options(parser) subparsers = parser.add_subparsers(help='sub-command name') # Parser for the "worker" sub-command parser_worker = subparsers.add_parser('worker', help='run a worker') parser_worker.set_defaults(func=worker) parser_worker.add_argument( '--queue', default='kuyruk', help='consume tasks from') # Parser for the "master" sub-command parser_master = subparsers.add_parser('master', help='run a master') parser_master.set_defaults(func=master) # Parser for the "requeue" sub-command parser_master = subparsers.add_parser('requeue', help='requeue failed tasks') parser_master.set_defaults(func=requeue.run) # Parser for the "manager" sub-command parser_master = subparsers.add_parser('manager', help='run manager') parser_master.set_defaults(func=manager.run) # Parse arguments args = parser.parse_args() config = create_config(args) if args.delete_config: os.unlink(config.filename) # Run the sub-command function args.func(config, args) def add_config_options(parser): """Adds options for overriding values in config.""" config_group = parser.add_argument_group('override values in config') # Add every attribute in Config as command line option for key in sorted(dir(Config), reverse=True): if key.isupper(): config_group.add_argument(to_option(key)) def create_config(args): """Creates Config object and overrides it's values from args.""" config = Config() if args.config: config.from_pyfile(args.config) # Override values in config for key, value in vars(args).iteritems(): if value is not None: key = to_attr(key) if hasattr(Config, key): try: value = ast.literal_eval(value) except ValueError: pass setattr(config, key, value) return config def to_option(attr): return '--%s' % attr.lower().replace('_', '-') def to_attr(option): return option.upper().replace('-', '_') if __name__ == '__main__': main()
Python
0.000031
@@ -1778,70 +1778,8 @@ rgs) -%0A if args.delete_config:%0A os.unlink(config.filename) %0A%0A @@ -2726,16 +2726,75 @@ value)%0A%0A + if args.delete_config:%0A os.unlink(args.config)%0A%0A retu
8a6b644d35c771b0ec5a756d1f36a24a429b5def
Raise an exception on undefined template variables if in debug mode
antxetamedia/settings.py
antxetamedia/settings.py
import os from django.utils.six import text_type from django.template.base import TemplateSyntaxError BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'd2w#o#(!antcw5e%(#p5*pu(x=zhw60^byh$)ps+4#e8m#-fj!' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] INSTALLED_APPS = [ 'django.contrib.sites', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'grappelli.dashboard', 'grappelli', 'django.contrib.admin', 'adminsortable2', 'ckeditor', 'ckeditor_uploader', 'compressor', 'recurrence', 'kombu.transport.django', 'watson', 'sorl.thumbnail', 'django_filters', 'antxetamedia.frontpage', 'antxetamedia.blobs.apps.BlobsConfig', 'antxetamedia.shows', 'antxetamedia.news.apps.NewsConfig', 'antxetamedia.radio.apps.RadioConfig', 'antxetamedia.projects.apps.ProjectsConfig', 'antxetamedia.schedule', 'antxetamedia.widgets', 'antxetamedia.events.apps.EventsConfig', 'antxetamedia.flatpages', 'antxetamedia.archive', ] MIDDLEWARE_CLASSES = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ] SITE_ID = 1 ROOT_URLCONF = 'antxetamedia.urls' class InvalidString(text_type): def __mod__(self, other): return TemplateSyntaxError("Undefined variable '{}'".format(other)) TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join('antxetamedia/templates')], 'APP_DIRS': True, 'OPTIONS': { 'debug': DEBUG, 'string_if_invalid': InvalidString("%s") if DEBUG else '', 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'antxetamedia.flatpages.context_processors.menu_flatpage_list', ], }, }, ] WSGI_APPLICATION = 'antxetamedia.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'eu' LANGUAGES = [('eu', 'Euskara')] TIME_ZONE = 'Europe/Paris' USE_I18N = True USE_L10N = True USE_TZ = True LOCALE_PATHS = [os.path.join(BASE_DIR, 'antxetamedia/locale')] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ MEDIA_ROOT = os.path.join(BASE_DIR, '.media') MEDIA_URL = '/media/' STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, '.assets') STATICFILES_DIRS = [os.path.join(BASE_DIR, 'antxetamedia/static')] STATICFILES_FINDERS = [ 'compressor.finders.CompressorFinder', 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ] BROKER_URL = 'django://' CELERY_ALWAYS_EAGER = True COMPRESS_PRECOMPILERS = [('text/x-sass', 'django_libsass.SassCompiler')] CKEDITOR_JQUERY_URL = os.path.join(STATIC_URL, 'bower_components/jquery/dist/jquery.min.js') CKEDITOR_UPLOAD_PATH = 'ckeditor/' CKEDITOR_IMAGE_BACKEND = 'pillow' CKEDITOR_CONFIGS = { 'default': { 'extraPlugins': 'iframe,autoembed', 'toolbar': [ ['Format', 'Bold', 'Italic', 'Underline', 'StrikeThrough', '-', 'NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'], ['Image', 'Link', 'Iframe', 'Source'], ['Undo', 'Redo', '-', 'Cut', 'Copy', 'Paste', 'Find', 'Replace', '-', 'Print'], ], } } GRAPPELLI_INDEX_DASHBOARD = 'antxetamedia.dashboard.AntxetamediaDashboard' GRAPPELLI_ADMIN_TITLE = 'Antxetamedia' GRAPPELLI_SWITCH_USER = True GRAPPELLI_CLEAN_INPUT_TYPES = False FRONTPAGE_NEWSPODCASTS = 10 FRONTPAGE_RADIOPODCASTS = 5 FRONTPAGE_EVENTS = 5 NEWSCATEGORIES_COOKIE = 'newscategories' RADIOSHOWS_COOKIE = 'radioshows' SYNC_BLOBS = False
Python
0.000001
@@ -1881,13 +1881,12 @@ r -eturn +aise Tem
c13f78f358b3befe71539804abc80df9179b6bfa
bump to v1.7.6
client/__init__.py
client/__init__.py
__version__ = 'v1.7.5' FILE_NAME = 'ok' import os import sys sys.path.insert(0, '') # Add directory in which the ok.zip is stored to sys.path. sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
Python
0.000001
@@ -17,9 +17,9 @@ 1.7. -5 +6 '%0A%0AF
7687be07369281723bb331e9113bf5cd2ca1bc3b
fix import
analyzerdam/sqlDAM.py
analyzerdam/sqlDAM.py
''' Created on Nov 9, 2011 @author: ppa ''' import sys import logging from sqlalchemy import Column, Integer, String, Float, Sequence, create_engine, and_ from sqlalchemy.orm import sessionmaker, scoped_session from pyStock.models import Quote, Tick from pyStock import Base from analyzerdam.baseDAM import BaseDAM from analyzer.lib.util import splitListEqually LOG=logging.getLogger() class FmSql(Base): __tablename__='fundamental' id=Column(Integer, Sequence('user_id_seq'), primary_key=True) symbol=Column(String(12)) field=Column(String(50)) timeStamp=Column(String(50)) value=Column(Float) def __init__(self, symbol, field, timeStamp, value): ''' constructor ''' self.symbol=symbol self.field=field self.timeStamp=timeStamp self.value=value def __repr__(self): return "<Fundamentals('%s', '%s', '%s', '%s')>".format(self.symbol, self.field, self.timeStamp, self.value) class SqlDAM(BaseDAM): def __init__(self, setting, echo=False): super(SqlDAM, self).__init__() self.echo=echo self.first=True self.engine=None self.ReadSession=None self.WriteSession=None self.writeSession=None if 'db' not in setting: raise Exception("db not specified in setting") self.engine=create_engine(setting['db'], echo=self.echo) def getReadSession(self): ''' return scopted session ''' if self.ReadSession is None: self.ReadSession=scoped_session(sessionmaker(bind=self.engine)) return self.ReadSession def getWriteSession(self): ''' return unscope session, TODO, make it clear ''' if self.WriteSession is None: self.WriteSession=sessionmaker(bind=self.engine) self.writeSession=self.WriteSession() return self.writeSession def read_quotes(self, start, end): ''' read quotes ''' if end is None: end=sys.maxint session=self.getReadSession()() try: rows=session.query(Quote).filter(and_(Quote.symbol == self.symbol, Quote.time >= int(start), Quote.time < int(end))) finally: self.getReadSession.remove() return [self.__sqlToQuote(row) for row in rows] def readTupleQuotes(self, start, end): ''' read quotes as tuple ''' if end is None: end=sys.maxint session=self.getReadSession()() try: rows=session.query(Quote).filter(and_(Quote.symbol == self.symbol, Quote.time >= int(start), Quote.time < int(end))) finally: self.getReadSession().remove() return rows def readBatchTupleQuotes(self, symbols, start, end): ''' read batch quotes as tuple to save memory ''' if end is None: end=sys.maxint ret={} session=self.getReadSession()() try: symbolChunks=splitListEqually(symbols, 100) for chunk in symbolChunks: rows=session.query(Quote.symbol, Quote.time, Quote.close, Quote.volume, Quote.low, Quote.high).filter(and_(Quote.symbol.in_(chunk), Quote.time >= int(start), Quote.time < int(end))) for row in rows: if row.time not in ret: ret[row.time]={} ret[row.time][row.symbol]=self.__sqlToTupleQuote(row) finally: self.getReadSession().remove() return ret def read_tuple_ticks(self, start, end): ''' read ticks as tuple ''' if end is None: end=sys.maxint session=self.getReadSession()() try: rows=session.query(Tick).filter(and_(Tick.symbol == self.symbol, Tick.time >= int(start), Tick.time < int(end))) finally: self.getReadSession().remove() return [self.__sqlToTupleTick(row) for row in rows] def read_ticks(self, start, end): ''' read ticks ''' if end is None: end=sys.maxint session=self.getReadSession()() try: rows=session.query(Tick).filter(and_(Tick.symbol == self.symbol, Tick.time >= int(start), Tick.time < int(end))) finally: self.getReadSession().remove() return [self.__sqlToTick(row) for row in rows] def write_quotes(self, quotes): ''' write quotes ''' if self.first: Base.metadata.create_all(self.engine, checkfirst=True) self.first=False session=self.getWriteSession() session.add_all([self.__quoteToSql(quote) for quote in quotes]) def write_ticks(self, ticks): ''' write ticks ''' if self.first: Base.metadata.create_all(self.engine, checkfirst=True) self.first=False session=self.getWriteSession() session.add_all([self.__tickToSql(tick) for tick in ticks]) def commit(self): ''' commit changes ''' session=self.getWriteSession() session.commit() def write_fundamental(self, keyTimeValueDict): ''' write fundamental ''' if self.first: Base.metadata.create_all(self.__getEngine(), checkfirst=True) self.first=False sqls=self._fundamentalToSqls(keyTimeValueDict) session=self.Session() try: session.add_all(sqls) finally: self.Session.remove() def read_fundamental(self): rows=self.__getSession().query(FmSql).filter(and_(FmSql.symbol == self.symbol)) return self._sqlToFundamental(rows) def _sqlToFundamental(self, rows): keyTimeValueDict={} for row in rows: if row.field not in keyTimeValueDict: keyTimeValueDict[row.field]={} keyTimeValueDict[row.field][row.timeStamp]=row.value return keyTimeValueDict def _fundamentalToSqls(self, keyTimeValueDict): ''' convert fundament dict to sqls ''' sqls=[] for key, timeValues in keyTimeValueDict.iteritems(): for timeStamp, value in timeValues.iteritems(): sqls.append(FmSql(self.symbol, key, timeStamp, value)) return sqls
Python
0.000001
@@ -301,19 +301,12 @@ yzer +. dam -.baseDAM imp
bfce2efa821d83b83468858a068c9de7d96acab1
Make connect to heat match python-heatclient shell method
dib.py
dib.py
#!/usr/bin/env python import json import yaml import os import heatclient.client import keystoneclient.v2_0.client import uuid def parse(template): return yaml.safe_load(template) def get_identity_client(username, password, tenant_name): auth_url = os.environ['OS_AUTH_URL'] return keystoneclient.v2_0.client.Client(username=username, password=password, tenant_name=tenant_name, auth_url=auth_url) def get_heat_client(username=None, password=None, tenant_name=None): keystone = get_identity_client(username, password, tenant_name) token = keystone.auth_token try: endpoint = keystone.service_catalog.url_for( service_type='orchestration', endpoint_type='publicURL') except keystoneclient.exceptions.EndpointNotFound: return None else: return heatclient.client.Client('1', endpoint, token=token, username=username, password=password) hc = get_heat_client(os.environ['OS_USERNAME'], os.environ['OS_PASSWORD'], os.environ['OS_TENANT_NAME']) dib_template = open('dib.yaml', 'r') dib_yaml = parse(dib_template) template_file = open('OpenShift.yaml', 'r') template_yaml = parse(template_file) for res in template_yaml['resources']: if template_yaml['resources'][res]['type'] == 'OS::Nova::Server': # build install script content = '#!/bin/bash\n\nset -e\ninstall-packages heat-cfntools\n' content = content + 'install -D /tmp/in_target.d/install.d/metadata /var/lib/heat-cfntools/cfn-init-data\n\n' content = content + '/usr/bin/cfn-init\n\n' yaml_content_dib = {'/elements/dibt/install.d/30-dibt': {'content': content, 'mode': '00755', 'owner': 'root', 'group': 'root'}} yaml_content_metadata = {'/elements/dibt/install.d/metadata': {'content': json.dumps(template_yaml['resources'][res]['Metadata'])}} files = {} files.update(yaml_content_dib) files.update(yaml_content_metadata) for res in dib_yaml['resources']: if dib_yaml['resources'][res]['type'] == 'OS::Nova::Server': dib_yaml['resources'][res]['Metadata']['AWS::CloudFormation::Init']['config']['files'] = files # print yaml.dump(dib_yaml) # create dib stacks for all server resources in this stack stack_name = 'dib' + str(uuid.uuid4()).replace('-', '_') kwargs ={'stack_name': stack_name, 'template': yaml.dump(dib_yaml), 'parameters': {'os_username': os.environ['OS_USERNAME'], 'os_password': os.environ['OS_PASSWORD'], 'os_tenant_name': os.environ['OS_TENANT_NAME'], 'os_auth_url': os.environ['OS_AUTH_URL'], 'dib_image_name': stack_name, 'key_name': 'goofy'}} hc.stacks.create(**kwargs)
Python
0.000002
@@ -54,26 +54,53 @@ os%0A +from keystoneclient.v2_0 import -heat client -. + as ks_ clie @@ -109,36 +109,65 @@ %0Aimport -keystoneclient.v2_0. +heatclient%0Afrom heatclient import client as heat_ client%0Ai @@ -240,69 +240,149 @@ e)%0A%0A -def get_identity_client(username, password, tenant_name):%0A +keystone = ks_client.Client(username=os.environ%5B'OS_USERNAME'%5D, password=os.environ%5B'OS_PASSWORD'%5D, tenant_name=os.environ%5B'OS_TENANT_NAME'%5D, aut @@ -386,19 +386,17 @@ auth_url - = += os.envir @@ -416,463 +416,272 @@ RL'%5D -%0A%0A return keystoneclient.v2_0.client.Client(username=username,%0A password=password,%0A tenant_name=tenant_name,%0A auth_url=auth_url)%0A%0Adef get_heat_client(username=None, password=None, tenant_name= +)%0Akwargs = %7B%0A 'token': keystone.auth_token,%0A 'insecure': False,%0A 'timeout': 600,%0A 'ca_file': None,%0A 'cert_file': None -): +, %0A - keystone = get_identity_client(username, password, tenant_name)%0A token = keystone.auth_token%0A try:%0A +'key_file': None,%0A 'tenant_id': '',%0A 'username': os.environ%5B'OS_USERNAME'%5D,%0A 'password': os.environ%5B'OS_PASSWORD'%5D%0A%7D%0A endp @@ -720,30 +720,16 @@ url_for( -%0A service_ @@ -749,29 +749,16 @@ ration', -%0A endpoin @@ -781,363 +781,55 @@ L')%0A - except keystoneclient.exceptions.EndpointNotFound:%0A return None%0A else:%0A return heatclient.client.Client('1',%0A endpoint,%0A token=token,%0A username=username,%0A password=password)%0A%0Ahc = get_heat_client(os.environ%5B'OS_USERNAME'%5D, os.environ%5B'OS_PASSWORD'%5D, os.environ%5B'OS_TENANT_NAME'%5D +hc = heat_client.Client('1', endpoint, **kwargs )%0A%0Ad @@ -1987,51 +1987,8 @@ es%0A%0A -# print yaml.dump(dib_yaml)%0A%0A @@ -2233,24 +2233,17 @@ - +%09 'parame @@ -2304,32 +2304,25 @@ - +%09 'os_pas @@ -2368,32 +2368,25 @@ - +%09 'os_ten @@ -2438,32 +2438,25 @@ - +%09 'os_aut @@ -2506,29 +2506,22 @@ - +%09 - 'dib_ima @@ -2554,32 +2554,25 @@ - +%09 'key_na @@ -2584,16 +2584,124 @@ 'goofy'%7D +,%0A 'timeout_mins': '6000',%0A 'disable_rollback': True %7D%0A
b67fff75b01c2fc8fff7f019991e658b8cb39561
Fix chipseq resource logic when peak calling is not requested.
bcbio/chipseq/peaks.py
bcbio/chipseq/peaks.py
"""High level parallel SNP and indel calling using multiple variant callers. """ import os import copy from bcbio.log import logger from bcbio import bam, utils from bcbio.pipeline import config_utils from bcbio.pipeline import datadict as dd from bcbio.chipseq import macs2 from bcbio.provenance import do from bcbio.distributed.transaction import file_transaction def get_callers(): from bcbio.chipseq import macs2 return {"macs2": macs2.run} def peakcall_prepare(data, run_parallel): """Entry point for doing peak calling""" caller_fns = get_callers() to_process = [] for sample in data: mimic = copy.copy(sample[0]) callers = dd.get_peakcaller(sample[0]) if not isinstance(callers, list): callers = [callers] for caller in callers: if caller in caller_fns: mimic["peak_fn"] = caller name = dd.get_sample_name(mimic) mimic = _check(mimic, data) if mimic: to_process.append(mimic) else: logger.info("Skipping peak calling. No input sample for %s" % name) if to_process: after_process = run_parallel("peakcalling", to_process) data = _sync(data, after_process) return data def calling(data): """Main function to parallelize peak calling.""" chip_bam = dd.get_work_bam(data) input_bam = data.get("work_bam_input", None) caller_fn = get_callers()[data["peak_fn"]] name = dd.get_sample_name(data) out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), data["peak_fn"], name )) # chip_bam = _prepare_bam(chip_bam, dd.get_variant_regions(data), data['config']) # input_bam = _prepare_bam(input_bam, dd.get_variant_regions(data), data['config']) out_file = caller_fn(name, chip_bam, input_bam, dd.get_genome_build(data), out_dir, dd.get_chip_method(data), data["config"]) data["peaks_file"] = out_file return [[data]] def _prepare_bam(bam_file, bed_file, config): if not bam_file or not bed_file: return bam_file out_file = utils.append_stem(bam_file, '_filter') samtools = config_utils.get_program("samtools", config) if not utils.file_exists(out_file): with file_transaction(out_file) as tx_out: cmd = "{samtools} view -bh -L {bed_file} {bam_file} > {tx_out}" do.run(cmd.format(**locals()), "Clean %s" % bam_file) return out_file def _sync(original, processed): """ Add output to data if run sucessfully. For now only macs2 is available, so no need to consider multiple callers. """ for original_sample in original: original_sample[0]["peaks_file"] = [] for processs_sample in processed: if dd.get_sample_name(original_sample[0]) == dd.get_sample_name(processs_sample[0]): if utils.file_exists(processs_sample[0]["peaks_file"]): original_sample[0]["peaks_file"].append(processs_sample[0]["peaks_file"]) return original def _check(sample, data): """Get input sample for each chip bam file.""" if dd.get_chip_method(sample).lower() == "atac": return [sample] if dd.get_phenotype(sample) == "input": return None for origin in data: if dd.get_batch(sample) in dd.get_batch(origin[0]) and dd.get_phenotype(origin[0]) == "input": sample["work_bam_input"] = dd.get_work_bam(origin[0]) return [sample] return [sample] def _get_multiplier(samples): """Get multiplier to get jobs only for samples that have input """ to_process = 1.0 to_skip = 0 for sample in samples: if dd.get_phenotype(sample[0]) == "chip": to_process += 1.0 elif dd.get_chip_method(sample[0]).lower() == "atac": to_process += 1.0 else: to_skip += 1.0 mult = (to_process - to_skip) / len(samples) if mult < 0: mult = 1 / len(samples) return mult
Python
0
@@ -3984,16 +3984,17 @@ f mult %3C += 0:%0A @@ -4033,12 +4033,20 @@ return m -ult +ax(mult, 1) %0A
0872bb820c75a522c3947c372b0ebd63b9086c54
[clamav] TCP port connection must be an integer
misp_modules/modules/expansion/clamav.py
misp_modules/modules/expansion/clamav.py
import base64 import io import json import logging import sys import zipfile import clamd from typing import Optional from pymisp import MISPEvent, MISPObject log = logging.getLogger("clamav") log.setLevel(logging.DEBUG) sh = logging.StreamHandler(sys.stdout) sh.setLevel(logging.DEBUG) fmt = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) sh.setFormatter(fmt) log.addHandler(sh) moduleinfo = { "full_name": "ClamAV", "version": "0.1", "author": "Jakub Onderka", "description": "Submit file to ClamAV", "module-type": ["expansion"] } moduleconfig = ["connection"] mispattributes = { "input": ["attachment", "malware-sample"], "format": "misp_standard" } def create_response(software: str, signature: Optional[str] = None) -> dict: misp_event = MISPEvent() if signature: av_signature_object = MISPObject("av-signature") av_signature_object.add_attribute("signature", signature) av_signature_object.add_attribute("software", software) misp_event.add_object(av_signature_object) event = json.loads(misp_event.to_json()) results = {key: event[key] for key in ('Attribute', 'Object') if (key in event and event[key])} return {"results": results} def connect_to_clamav(connection_string: str) -> clamd.ClamdNetworkSocket: if connection_string.startswith("unix://"): return clamd.ClamdUnixSocket(connection_string.replace("unix://", "")) elif ":" in connection_string: host, port = connection_string.split(":") return clamd.ClamdNetworkSocket(host, port) else: raise Exception("ClamAV connection string is invalid") def handler(q=False): if q is False: return False request = json.loads(q) connection_string: str = request["config"].get("connection") if not connection_string: return {"error": "No ClamAV connection string provided"} attribute = request.get("attribute") if not attribute: return {"error": "No attribute provided"} attribute_type = attribute.get("type") if not attribute_type: return {"error": "No attribute type provided"} if attribute_type not in mispattributes["input"]: return {"error": "Invalid attribute type provided, expected 'malware-sample' or 'attachment'"} attribute_data = attribute.get("data") if not attribute_data: return {"error": "No attribute data provided"} try: clamav = connect_to_clamav(connection_string) software_version = clamav.version() except Exception: logging.exception("Could not connect to ClamAV") return {"error": "Could not connect to ClamAV"} try: data = base64.b64decode(attribute_data, validate=True) except Exception: logging.exception("Provided data is not valid base64 encoded string") return {"error": "Provided data is not valid base64 encoded string"} if attribute_type == "malware-sample": try: with zipfile.ZipFile(io.BytesIO(data)) as zipf: data = zipf.read(zipf.namelist()[0], pwd=b"infected") except Exception: logging.exception("Could not extract malware sample from ZIP file") return {"error": "Could not extract malware sample from ZIP file"} try: status, reason = clamav.instream(io.BytesIO(data))["stream"] except Exception: logging.exception("Could not send attribute data to ClamAV. Maybe file is too big?") return {"error": "Could not send attribute data to ClamAV. Maybe file is too big?"} if status == "ERROR": return {"error": "ClamAV returned error message: {}".format(reason)} elif status == "OK": return {"results": {}} elif status == "FOUND": return create_response(software_version, reason) else: return {"error": "ClamAV returned invalid status {}: {}".format(status, reason)} def introspection(): return mispattributes def version(): moduleinfo["config"] = moduleconfig return moduleinfo
Python
0.999322
@@ -1592,13 +1592,18 @@ st, +int( port) +) %0A
d9ee5286e3134a71a1e2f19f24c01fe4c30bdf6a
add domains
misp_modules/modules/expansion/onyphe.py
misp_modules/modules/expansion/onyphe.py
import json # -*- coding: utf-8 -*- import json try: from onyphe import Onyphe except ImportError: print("pyonyphe module not installed.") misperrors = {'error': 'Error'} mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domains'], 'output': ['hostname', 'domain', 'ip-src', 'ip-dst','url']} # possible module-types: 'expansion', 'hover' or both moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven', 'description': 'Query on Onyphe', 'module-type': ['expansion', 'hover']} # config fields that your code expects from the site admin moduleconfig = ['apikey'] def handler(q=False): if q is False: return False request = json.loads(q) if not request.get('config') and not (request['config'].get('apikey')): misperrors['error'] = 'Onyphe authentication is missing' return misperrors api = Onyphe(request['config'].get('apikey')) if not api: misperrors['error'] = 'Onyphe Error instance api' ip = '' if request.get('ip-src'): ip = request['ip-src'] elif request.get('ip-dst'): ip = request['ip-dst'] else: misperrors['error'] = "Unsupported attributes type" return misperrors return handle_expansion(api, ip, misperrors) def handle_expansion(api, ip, misperrors): result = api.ip(ip) if result['status'] == 'nok': misperrors['error'] = result['message'] return misperrors categories = list(set([item['@category'] for item in result['results']])) result_filtered = {"results": []} urls_pasties = [] asn_list = [] os_list = [] for r in result['results']: if r['@category'] == 'pastries': if r['@type'] == 'pastebin': urls_pasties.append('https://pastebin.com/raw/%s' % r['key']) elif r['@category'] == 'synscan': asn_list.append(r['asn']) os_list.append(r['os']) result_filtered['results'].append({'types': ['url'], 'values': urls_pasties, 'categories': ['External analysis']}) result_filtered['results'].append({'types': ['AS'], 'values': list(set(asn_list)), 'categories': ['Network activity']}) result_filtered['results'].append({'types': ['target-machine'], 'values': list(set(os_list)), 'categories': ['Targeting data']}) return result_filtered def introspection(): return mispattributes def version(): moduleinfo['config'] = moduleconfig return moduleinfo
Python
0.000001
@@ -1640,24 +1640,75 @@ s_list = %5B%5D%0A + domains_resolver = %5B%5D%0A domains_forward = %5B%5D%0A for r in @@ -1727,16 +1727,16 @@ ults'%5D:%0A - @@ -1986,28 +1986,343 @@ os_ -list.append(r%5B'os'%5D) +target = r%5B'os'%5D%0A if os_target != 'Unknown':%0A os_list.append(r%5B'os'%5D)%0A elif r%5B'@category'%5D == 'resolver' and r%5B'@type'%5D =='reverse':%0A domains_resolver.append(r%5B'reverse'%5D)%0A elif r%5B'@category'%5D == 'resolver' and r%5B'@type'%5D =='forward':%0A domains_forward.append(r%5B'forward'%5D)%0A %0A @@ -2472,24 +2472,25 @@ nalysis'%5D%7D)%0A +%0A result_f @@ -2770,32 +2770,32 @@ (set(os_list)),%0A - @@ -2851,16 +2851,599 @@ data'%5D%7D) +%0A%0A result_filtered%5B'results'%5D.append(%7B'types': %5B'domains'%5D,%0A 'values': list(set(domains_resolver)),%0A 'categories': %5B'Network activity'%5D,%0A 'comments': %5B'resolver to %25s' %25 ip%5D%7D)%0A%0A result_filtered%5B'results'%5D.append(%7B'types': %5B'domains'%5D,%0A 'values': list(set(domains_resolver)),%0A 'categories': %5B'Network activity'%5D,%0A 'comments': %5B'forward to %25s' %25 ip%5D%7D) %0A ret
76133ace8bcd48a3226cdcd15f8e7103f295f4b7
Update yeti.py
misp_modules/modules/expansion/yeti.py
misp_modules/modules/expansion/yeti.py
import json try: import pyeti except ImportError: print("pyeti module not installed.") from pymisp import MISPEvent, MISPObject misperrors = {'error': 'Error'} mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'], 'format': 'misp_standard' } # possible module-types: 'expansion', 'hover' or both moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven', 'description': 'Query on yeti', 'module-type': ['expansion', 'hover']} moduleconfig = ['apikey', 'url'] class Yeti(): def __init__(self, url, key,attribute): self.misp_mapping = {'Ip': 'ip-dst', 'Domain': 'domain', 'Hostname': 'hostname', 'Url': 'url'} self.yeti_client = pyeti.YetiApi(url=url, api_key=key) self.attribute = attribute self.misp_event = MISPEvent() self.misp_event.add_attribute(**attribute) def search(self, value): obs = self.yeti_client.observable_search(value=value) if obs: return obs[0] def get_neighboors(self, obs_id): neighboors = self.yeti_client.neighbors_observables(obs_id) if neighboors and 'objs' in neighboors: for n in neighboors['objs']: yield n def get_tags(self, value): obs = self.search(value) if obs: for t in obs['tags']: yield t def get_entity(self, obs_id): companies = self.yeti_client.observable_to_company(obs_id) actors = self.yeti_client.observable_to_actor(obs_id) campaigns = self.yeti_client.observable_to_campaign(obs_id) exploit_kit = self.yeti_client.observable_to_exploitkit(obs_id) exploit = self.yeti_client.observable_to_exploit(obs_id) ind = self.yeti_client.observable_to_indicator(obs_id) res = [] res.extend(companies) res.extend(actors) res.extend(campaigns) res.extend(exploit) res.extend(exploit_kit) res.extend(ind) for r in res: yield r['name'] def parse_yeti_result(self): obs = self.search(self.attribute['value']) values = [] types = [] for obs_to_add in self.get_neighboors(obs['id']): object_misp = self.get_object(obs_to_add) if object_misp: self.misp_event.add_object(object_misp) print('Event MISP %s' % self.misp_event.to_json()) def get_result(self): event = json.loads(self.misp_event.to_json()) results = {key: event[key] for key in ('Attribute', 'Object')} return results def get_object(self, obj_to_add): if (obj_to_add['type'] == 'Ip' and self.attribute in ['hostname','domain']) or\ (obj_to_add['type'] in ('Hostname', 'Domain') and self.attribute['type'] in ('ip-src', 'ip-dst')): domain_ip_object = MISPObject('domain-ip') domain_ip_object.add_attribute(self.__get_relation(obj_to_add), obj_to_add['value']) domain_ip_object.add_attribute('ip', self.attribute['value']) domain_ip_object.add_reference(self.attribute['uuid'], 'related_to') return domain_ip_object def __get_relation(self, obj_yeti): typ_attribute = self.misp_mapping[obj_yeti['type']] attr_misp = {'value': obj_yeti['value']} if typ_attribute == 'ip-src' or typ_attribute == 'ip-dst': return 'ip' elif 'domain' == typ_attribute: return 'domain' elif 'hostname' == typ_attribute: return 'domain' return attr_misp def handler(q=False): if q is False: return False apikey = None yeti_url = None yeti_client = None request = json.loads(q) attribute = request['attribute'] if attribute['type'] not in mispattributes['input']: return {'error': 'Unsupported attributes type'} if 'config' in request and 'url' in request['config']: yeti_url = request['config']['url'] if 'config' in request and 'apikey' in request['config']: apikey = request['config']['apikey'] if apikey and yeti_url: yeti_client = Yeti(yeti_url, apikey, attribute) if yeti_client: yeti_client.parse_yeti_result() return yeti_client.get_result() else: misperrors['error'] = 'Yeti Config Error' return misperrors def version(): moduleinfo['config'] = moduleconfig return moduleinfo def introspection(): return mispattributes
Python
0
@@ -2400,66 +2400,9 @@ sp)%0A - print('Event MISP %25s' %25 self.misp_event.to_json()) +%0A %0A @@ -2549,16 +2549,51 @@ ject')%7D%0A + print('results '%25 results)%0A
6e229622ff3bfce3750e4fe14bbd0d5b5d5a8d80
Fix typo
metafunctions/tests/test_concurrent.py
metafunctions/tests/test_concurrent.py
import operator import os from unittest import mock import functools import colors from metafunctions.tests.util import BaseTestCase from metafunctions.util import node from metafunctions.util import bind_call_state from metafunctions.util import highlight_current_function from metafunctions.util import concurrent from metafunctions.util import mmap from metafunctions.util import store, recall from metafunctions.util import star from metafunctions.concurrent import ConcurrentMerge from metafunctions import operators from metafunctions.core import CallState from metafunctions.exceptions import ConcurrentException, CompositionError, CallError class TestUnit(BaseTestCase): def test_basic(self): ab = a + b cab = ConcurrentMerge(ab) self.assertEqual(cab('_'), '_a_b') @mock.patch('metafunctions.util.highlight_current_function') def test_exceptions(self, mock_h): mock_h.side_effect = functools.partial(highlight_current_function, use_color=True) @node def fail(x): if not x: 1 / 0 return x - 1 cmp = ConcurrentMerge(fail - fail) with self.assertRaises(ConcurrentException) as e: cmp(0) self.assertIsInstance(e.exception.__cause__, ZeroDivisionError) self.assertEqual(e.exception.__cause__.args[0], f'division by zero \n\nOccured in the following function: ' f'concurrent({colors.red("->fail<-")} - fail)') def test_consistent_meta(self): ''' Every function in the pipeline recieves the same meta. ''' @node @bind_call_state def f(call_state, x): self.assertIs(call_state._meta_entry, cmp) return 1 @node() @bind_call_state def g(call_state, x): self.assertIs(call_state._meta_entry, cmp) return 1 @node @bind_call_state def h(call_state, x): self.assertIs(call_state._meta_entry, cmp) return 1 @node @bind_call_state def i(call_state, x): self.assertIs(call_state._meta_entry, cmp) return 1 cmp = ConcurrentMerge(h + f + f / h + i - g) self.assertEqual(cmp(1), 3) self.assertEqual(cmp(1, call_state=cmp.new_call_state()), 3) # how do pretty tracebacks work in multiprocessing? def test_call(self): c = concurrent(a+b) self.assertEqual(c('_'), '_a_b') self.assertEqual(c('-', '_'), '-a_b') with self.assertRaises(CallError): c('_', '_', '_') @node def d(): return 'd' abd = concurrent(a & b & d) self.assertEqual(abd('-', '_'), ('-a', '_b', 'd')) def test_concurrent(self): c = concurrent(a + b) self.assertIsInstance(c, ConcurrentMerge) self.assertEqual(c('_'), '_a_b') def test_not_concurrent(self): #can only upgrade FunctionMerges with self.assertRaises(CompositionError): concurrent(a) with self.assertRaises(CompositionError): concurrent(a | b) def test_str_repr(self): cab = ConcurrentMerge(a + b) cmap = concurrent(mmap(a)) self.assertEqual(repr(cab), f'ConcurrentMerge({operator.add}, ({repr(a)}, {repr(b)}))') self.assertEqual(str(cab), f'concurrent(a + b)') self.assertEqual(str(cmap), f'concurrent(mmap(a))') def test_basic_map(self): # We can upgrade maps to run in parallel banana = 'bnn' | concurrent(mmap(a)) | ''.join str_concat = operators.concat | node(''.join) batman = concurrent(mmap(a, operator=str_concat)) self.assertEqual(banana(), 'banana') self.assertEqual(batman('nnnn'), 'nananana') def test_multi_arg_map(self): @node def f(*args): return args m = concurrent(mmap(f)) with self.assertRaises(CompositionError): #Because star returns a simple function, we can't upgrade it. starmap = concurrent(star(mmap(f))) #we have to wrap concurrent in star instead. starmap = star(concurrent(mmap(f))) mapstar = concurrent(mmap(star(f))) self.assertEqual(m([1, 2, 3], [4, 5, 6]), ((1, 4), (2, 5), (3, 6))) self.assertEqual(m([1, 2, 3]), ((1, ), (2, ), (3, ))) with self.assertRaises(TypeError): self.assertEqual(starmap([1, 2, 3])) self.assertEqual(starmap([[1, 2, 3]]), m([1, 2, 3])) cmp = ([1, 2, 3], [4, 5, 6]) | starmap self.assertEqual(cmp(), ((1, 4), (2, 5), (3, 6))) cmp = ([1, 2, 3], [4, 5, 6]) | mapstar self.assertEqual(cmp(), ((1, 2, 3), (4, 5, 6))) def test_call_state(self): # Call state should be usable in concurrent chains chain_a = a | b | store('ab') chain_b = b | a | store('ba') cmp = concurrent(chain_a & chain_b) state = CallState() self.assertEqual(cmp('_', call_state=state), ('_ab', '_ba')) self.assertDictEqual(state.data, {'ab': '_ab', 'ba': '_ba'}) # If call_state.data contains something that isn't pickleable, fail gracefully bad = [lambda: None] | store('o') cmp = concurrent(bad & bad) with self.assertRaises(ConcurrentException): cmp() def test_unpickleable_exception(self): # Don't let child processes crash, even if they do weird things like raise unpickleable # exceptions @node def f(): class BadException(Exception): pass raise BadException() cmp = concurrent(f+f) with self.assertRaises(ConcurrentException): cmp() ### Simple Sample Functions ### @node def a(x): return x + 'a' @node def b(x): return x + 'b'
Python
0.999999
@@ -5404,25 +5404,24 @@ test_unpickl -e able_excepti
76b087986aa90967918ec52b459a857c11743203
Update patterns
module/plugins/hoster/ZippyshareCom.py
module/plugins/hoster/ZippyshareCom.py
# -*- coding: utf-8 -*- import re from os import path from urllib import unquote from urlparse import urljoin from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo class ZippyshareCom(SimpleHoster): __name__ = "ZippyshareCom" __type__ = "hoster" __version__ = "0.60" __pattern__ = r'(?P<HOST>http://www\d{0,2}\.zippyshare\.com)/v(?:/|iew\.jsp.*key=)(?P<KEY>\d+)' __description__ = """Zippyshare.com hoster plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] NAME_PATTERN = r'("/[di]/\d+/".+?"/|<title>Zippyshare.com - )(?P<N>.+?)("|</title>)' SIZE_PATTERN = r'>Size:.+?">(?P<S>[\d.,]+) (?P<U>[\w^_]+)' OFFLINE_PATTERN = r'>File does not exist on this server<' COOKIES = [(".zippyshare.com", "ziplocale", "en")] def setup(self): self.multiDL = True self.chunkLimit = -1 self.resumeDownload = True def handleFree(self): url = self.get_link() self.download(url) def getFileInfo(self): info = super(ZippyshareCom, self).getFileInfo() self.pyfile.name = info['name'] = unquote(info['name']) return info def get_checksum(self): try: a1, a2 = map(int, re.search(r'\(\'downloadB\'\).omg = (\d+)%(\d+)', self.html).groups()) c1, c2 = map(int, re.search(r'\(\'downloadB\'\).omg\) \* \((\d+)%(\d+)', self.html).groups()) b = (a1 % a2) * (c1 % c2) except: self.error(_("Unable to calculate checksum")) else: return b + 18 def get_link(self): checksum = self.get_checksum() p_url = path.join("d", self.info['KEY'], str(checksum), self.pyfile.name) dl_link = urljoin(self.info['HOST'], p_url) return dl_link getInfo = create_getInfo(ZippyshareCom)
Python
0
@@ -589,19 +589,22 @@ r'(%22 -/%5Bdi%5D/%5Cd+/%22 +%5Cd%7B6,%7D/%22%5B %5D*%5C+ .+?%22 @@ -1248,16 +1248,218 @@ try:%0A + m = re.search(r'%5C+%5B %5D*%5C((%5Cd+)%5B %5D*%5C%25%5B %5D*(%5Cd+)%5B %5D*%5C+%5B %5D*(%5Cd+)%5B %5D*%5C%25%5B %5D*(%5Cd+)%5C)%5B %5D*%5C+', self.html)%0A if m:%0A a1, a2, c1, c2 = map(int, m.groups())%0A else:%0A @@ -1543,32 +1543,36 @@ html).groups())%0A + c1, @@ -1653,32 +1653,33 @@ html).groups())%0A +%0A b =
da5b65005b2b241708d3a3f4d26756e7d90fe40d
Update DepositfilesCom.py
module/plugins/hoster/DepositfilesCom.py
module/plugins/hoster/DepositfilesCom.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import re from urllib import unquote from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo from module.plugins.internal.CaptchaService import ReCaptcha class DepositfilesCom(SimpleHoster): __name__ = "DepositfilesCom" __type__ = "hoster" __pattern__ = r"http://[\w\.]*?(depositfiles\.com|dfiles\.eu)(/\w{1,3})?/files/[\w]+" __version__ = "0.45" __description__ = """Depositfiles.com Download Hoster""" __author_name__ = ("spoob", "zoidberg") __author_mail__ = ("spoob@pyload.org", "zoidberg@mujmail.cz") FILE_SIZE_PATTERN = r': <b>(?P<S>[0-9.]+)&nbsp;(?P<U>[kKMG])i?B</b>' FILE_NAME_PATTERN = r'<script type="text/javascript">eval\( unescape\(\'(?P<N>.*?)\'' FILE_OFFLINE_PATTERN = r'<span class="html_download_api-not_exists"></span>' FILE_URL_REPLACEMENTS = [(r"\.com(/.*?)?/files", ".com/en/files"), (r"\.html$", "")] FILE_NAME_REPLACEMENTS = [(r'\%u([0-9A-Fa-f]{4})', lambda m: unichr(int(m.group(1), 16))), (r'.*<b title="(?P<N>[^"]+).*', "\g<N>" )] RECAPTCHA_PATTERN = r"Recaptcha.create\('([^']+)'" DOWNLOAD_LINK_PATTERN = r'<form id="downloader_file_form" action="(http://.+?\.(dfiles\.eu|depositfiles\.com)/.+?)" method="post"' def handleFree(self): self.html = self.load(self.pyfile.url, post={"gateway_result": "1"}, cookies=True) if re.search(self.FILE_OFFLINE_PATTERN, self.html): self.offline() if re.search(r'File is checked, please try again in a minute.', self.html) is not None: self.logInfo("DepositFiles.com: The file is being checked. Waiting 1 minute.") self.setWait(61) self.wait() self.retry() wait = re.search(r'html_download_api-limit_interval\">(\d+)</span>', self.html) if wait: wait_time = int(wait.group(1)) self.logInfo("%s: Traffic used up. Waiting %d seconds." % (self.__name__, wait_time)) self.setWait(wait_time) self.wantReconnect = True self.wait() self.retry() wait = re.search(r'>Try in (\d+) minutes or use GOLD account', self.html) if wait: wait_time = int(wait.group(1)) self.logInfo("%s: All free slots occupied. Waiting %d minutes." % (self.__name__, wait_time)) self.setWait(wait_time * 60, False) wait = re.search(r'Please wait (\d+) sec', self.html) if wait: self.setWait(int(wait.group(1))) found = re.search(r"var fid = '(\w+)';", self.html) if not found: self.retry(wait_time=5) params = {'fid': found.group(1)} self.logDebug("FID: %s" % params['fid']) captcha_key = '6LdRTL8SAAAAAE9UOdWZ4d0Ky-aeA7XfSqyWDM2m' found = re.search(self.RECAPTCHA_PATTERN, self.html) if found: captcha_key = found.group(1) self.logDebug("CAPTCHA_KEY: %s" % captcha_key) self.wait() recaptcha = ReCaptcha(self) for i in range(5): self.html = self.load("http://depositfiles.com/get_file.php", get=params) if '<input type=button value="Continue" onclick="check_recaptcha' in self.html: if not captcha_key: self.parseError('Captcha key') if 'response' in params: self.invalidCaptcha() params['challenge'], params['response'] = recaptcha.challenge(captcha_key) self.logDebug(params) continue found = re.search(self.DOWNLOAD_LINK_PATTERN, self.html) if found: if 'response' in params: self.correctCaptcha() link = unquote(found.group(1)) self.logDebug("LINK: %s" % link) break else: self.parseError('Download link') else: self.fail('No valid captcha response received') try: self.download(link, disposition=True) except: self.retry(wait_time=60) def handlePremium(self): if '<span class="html_download_api-gold_traffic_limit">' in self.html: self.logWarning("Download limit reached") self.retry(25, 3600, "Download limit reached") elif 'onClick="show_gold_offer' in self.html: self.account.relogin(self.user) self.retry() link = unquote( re.search('<div id="download_url">\s*<a href="(http://.+?\.depositfiles.com/.+?)"', self.html).group(1)) self.download(link, disposition=True) getInfo = create_getInfo(DepositfilesCom)
Python
0
@@ -336,16 +336,18 @@ = r%22http +s? ://%5B%5Cw%5C.
3ce75ad5f3e0178394e9d496327c2e11bb74c6ac
save schedule to SQL
app/data.py
app/data.py
from .models import Settings from app import db def get_query(db_model): try: q = db.session.query(db_model).order_by(db_model.index.desc()).first() except AttributeError: try: q = db.session.query(db_model).order_by(db_model.index.desc()).first() except AttributeError: return None return q def get_last_row(db_model=Settings): q = get_query(db_model) d = {} for col in q.__table__.columns._data.keys(): d[col] = getattr(q,col) return d def get_value(column,db_model=Settings): q = get_query(db_model) try: return getattr(q,column) except AttributeError: return None except TypeError: return None def change_setting(name,value): d = get_last_row(Settings) if name in d.keys(): d[name] = value del d['index'] del d['timestamp'] db.session.add(Settings(**d)) db.session.commit()
Python
0
@@ -927,12 +927,68 @@ on.commit()%0A +%0Adef parse_timetable(input_json):%0A %0A%0A%0A return None
e77bc141c8e2564509f093059e61cfb98be79e56
Add module docstring to __init__
lamana/__init__.py
lamana/__init__.py
# ----------------------------------------------------------------------------- import lamana.input_ import lamana.distributions import lamana.constructs import lamana.theories import lamana.output_ #from lamana.models import * #import lamana.ratios #import lamana.predictions #import lamana.gamuts __title__ = 'lamana' __version__ = '0.4.12-dev' # PEP 440 style __author__ = 'P. Robinson II' __license__ = 'BSD' __copyright__ = 'Copyright 2015, P. Robinson II'
Python
0.000001
@@ -73,16 +73,134 @@ -------%0A +'''The main init file that stores the package version number.'''%0A# __version__ is used by find_version() in setup.py%0A%0A import l
2cd901a3975691eb06f695f5e352c0bc46c923a0
Bump version to 0.4.11
lamana/__init__.py
lamana/__init__.py
# ----------------------------------------------------------------------------- import lamana.input_ import lamana.distributions import lamana.constructs import lamana.theories import lamana.output_ #from lamana.models import * #import lamana.ratios #import lamana.predictions #import lamana.gamuts __title__ = 'lamana' __version__ = '0.4.11.dev0' # PEP 440 style ##__version__ = '0.4.11-dev' __author__ = 'P. Robinson II' __license__ = 'BSD' __copyright__ = 'Copyright 2015, P. Robinson II'
Python
0
@@ -417,12 +417,8 @@ 4.11 --dev '%0A__
ef6596637df513fd3c59b1d45916a41a4e82506f
Update yeti.py
misp_modules/modules/expansion/yeti.py
misp_modules/modules/expansion/yeti.py
import json try: import pyeti except ImportError: print("pyeti module not installed.") from pymisp import MISPEvent, MISPObject misperrors = {'error': 'Error'} mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'], 'format': 'misp_standard' } # possible module-types: 'expansion', 'hover' or both moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven', 'description': 'Query on yeti', 'module-type': ['expansion', 'hover']} moduleconfig = ['apikey', 'url'] class Yeti(): def __init__(self, url, key,attribute): self.misp_mapping = {'Ip': 'ip-dst', 'Domain': 'domain', 'Hostname': 'hostname', 'Url': 'url'} self.yeti_client = pyeti.YetiApi(url=url, api_key=key) self.attribute = attribute self.misp_event = MISPEvent() self.misp_event.add_attribute(**attribute) def search(self, value): obs = self.yeti_client.observable_search(value=value) if obs: return obs[0] def get_neighboors(self, obs_id): neighboors = self.yeti_client.neighbors_observables(obs_id) if neighboors and 'objs' in neighboors: for n in neighboors['objs']: yield n def get_tags(self, value): obs = self.search(value) if obs: for t in obs['tags']: yield t def get_entity(self, obs_id): companies = self.yeti_client.observable_to_company(obs_id) actors = self.yeti_client.observable_to_actor(obs_id) campaigns = self.yeti_client.observable_to_campaign(obs_id) exploit_kit = self.yeti_client.observable_to_exploitkit(obs_id) exploit = self.yeti_client.observable_to_exploit(obs_id) ind = self.yeti_client.observable_to_indicator(obs_id) res = [] res.extend(companies) res.extend(actors) res.extend(campaigns) res.extend(exploit) res.extend(exploit_kit) res.extend(ind) for r in res: yield r['name'] def parse_yeti_result(self): obs = self.search(self.attribute['value']) values = [] types = [] for obs_to_add in self.get_neighboors(obs['id']): object_misp_domain_ip = self.__get_object_domain_ip(obs_to_add) if object_misp_domain_ip: self.misp_event.add_object(object_misp_domain_ip) # object_misp_url = self.__get_object_url(obs_to_add) # if object_misp_url: # self.misp_event.add_object(object_misp_url) def get_result(self): event = json.loads(self.misp_event.to_json()) results = {key: event[key] for key in ('Attribute', 'Object')} print('results %s' % results) return results def __get_object_domain_ip(self, obj_to_add): if (obj_to_add['type'] == 'Ip' and self.attribute['type'] in ['hostname','domain']) or\ (obj_to_add['type'] in ('Hostname', 'Domain') and self.attribute['type'] in ('ip-src', 'ip-dst')): domain_ip_object = MISPObject('domain-ip') domain_ip_object.add_attribute(self.__get_relation(obj_to_add), obj_to_add['value']) domain_ip_object.add_attribute(self.__get_relation(self.attribute, is_yeti_object=False), self.attribute['value']) domain_ip_object.add_reference(self.attribute['uuid'], 'related_to') return domain_ip_object def __get_object_url(self, obj_to_add): if (obj_to_add['type'] == 'Url' and self.attribute['type'] in ['hostname', 'domain', 'ip-src', 'ip-dst']) or ( obj_to_add['type'] in ('Hostname', 'Domain', 'Ip') and self.attribute['type'] == 'url' ): url_object = MISPObject('Url') obj_relation = self.__get_relation(obj_to_add) if obj_relation: print(obj_relation) print(obj_to_add['value']) url_object.add_attribute('url', obj_to_add['value']) obj_relation = self.__get_relation(self.attribute) if obj_relation: print(obj_relation) url_object.add_attribute(self.__get_relation(self.attribute), self.attribute['value']) url_object.add_reference(self.attribute['uuid'], 'related_to') print(url_object) return url_object def __get_relation(self, obj, is_yeti_object=True): if is_yeti_object: type_attribute = self.misp_mapping[obj['type']] else: type_attribute = obj['type'] if type_attribute == 'ip-src' or type_attribute == 'ip-dst': return 'ip' elif 'domain' == type_attribute: return 'domain' elif 'hostname' == type_attribute: return 'domain' elif type_attribute == 'url': return type_attribute def handler(q=False): if q is False: return False apikey = None yeti_url = None yeti_client = None request = json.loads(q) attribute = request['attribute'] if attribute['type'] not in mispattributes['input']: return {'error': 'Unsupported attributes type'} if 'config' in request and 'url' in request['config']: yeti_url = request['config']['url'] if 'config' in request and 'apikey' in request['config']: apikey = request['config']['apikey'] if apikey and yeti_url: yeti_client = Yeti(yeti_url, apikey, attribute) if yeti_client: yeti_client.parse_yeti_result() return {'results': yeti_client.get_result()} else: misperrors['error'] = 'Yeti Config Error' return misperrors def version(): moduleinfo['config'] = moduleconfig return moduleinfo def introspection(): return mispattributes
Python
0
@@ -2449,18 +2449,16 @@ - # object_ @@ -2513,18 +2513,16 @@ - # if obje @@ -2549,10 +2549,8 @@ - # @@ -4077,21 +4077,28 @@ tribute( -'url' +obj_relation , obj_to
edaaaf23bc13996bf571946128f206013045efbb
Resolve comilation issue for darwin-framework-tool on M1 (#21761)
scripts/build/build_darwin_framework.py
scripts/build/build_darwin_framework.py
#!/usr/bin/env -S python3 -B # Copyright (c) 2022 Project Matter Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from subprocess import PIPE, Popen def run_command(command): returncode = -1 command_log = b'' print("Running {}".format(command)) with Popen(command, cwd=os.getcwd(), stdout=PIPE, stderr=PIPE) as process: for line in process.stdout: command_log += line for line in process.stderr: command_log += line process.wait() returncode = process.returncode with open(args.log_path, "wb") as f: f.write(command_log) return returncode def build_darwin_framework(args): abs_path = os.path.abspath(args.out_path) if not os.path.exists(abs_path): os.mkdir(abs_path) command = ['xcodebuild', '-scheme', args.target, '-sdk', 'macosx', '-project', args.project_path, '-derivedDataPath', abs_path] command_result = run_command(command) print("Build Framework Result: {}".format(command_result)) exit(command_result) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Build the Matter Darwin framework") parser.add_argument( "--project_path", default="src/darwin/Framework/Matter.xcodeproj", help="Set the project path", required=True, ) parser.add_argument( "--out_path", default="/tmp/macos_framework_output", help="Output lpath for framework", required=True, ) parser.add_argument("--target", default="Matter", help="Name of target to build", required=True) parser.add_argument("--log_path", help="Output log file destination", required=True) args = parser.parse_args() build_darwin_framework(args)
Python
0
@@ -669,16 +669,32 @@ , Popen%0A +import platform%0A %0A%0Adef ru @@ -1330,16 +1330,25 @@ mand = %5B +%0A 'xcodebu @@ -1352,16 +1352,24 @@ ebuild', +%0A '-schem @@ -1371,16 +1371,24 @@ scheme', +%0A args.ta @@ -1396,16 +1396,32 @@ get, - '-sdk', +%0A '-sdk',%0A 'ma @@ -1426,16 +1426,24 @@ macosx', +%0A '-proje @@ -1446,16 +1446,24 @@ roject', +%0A args.pr @@ -1473,16 +1473,24 @@ ct_path, +%0A '-deriv @@ -1501,16 +1501,24 @@ taPath', +%0A abs_pat @@ -1518,16 +1518,86 @@ abs_path +,%0A %22PLATFORM_PREFERRED_ARCH=%7B%7D%22.format(platform.machine())%0A %5D%0A co
35d39957d1a4fd847509384dab429092a39715e3
Load pre-trained GloVe
distance.py
distance.py
# coding: utf-8 # Author: Hussein AL-NATSHEH <hussein.al-natsheh@ish-lyon.cnrs.fr> # License: BSD 3 clause # 2016 import pandas as pd import argparse import numpy as np def load_data(dataset, verbose=0): if dataset == "sts": #Load STS data (combined 2012-2014 and cleaned) data = pd.read_csv('data/sts_gs_all.csv', dtype={'Score': np.float32}) if verbose == 2: print data.shape print data.head(n=10) elif verbose == 1: print data.shape X = data.as_matrix(columns=["Sent1", "Sent2"]) y = data.as_matrix(columns=["Score"]) return X, y if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dataset", required=True, type=str) parser.add_argument("--verbose", default=1, type=int) args = parser.parse_args() X, y = load_data (args.dataset, args.verbose)
Python
0
@@ -219,13 +219,13 @@ == -%22sts%22 +'sts' :%0A%09%09 @@ -554,16 +554,269 @@ n X, y%0A%0A +def load_glove(filepath, verbose=0):%0A%09data = pd.read_csv(filepath, sep=' ', compression='gzip', skiprows=9, index_col=0, header=None, encoding='utf-8')%0A%09if verbose == 2:%0A%09%09print data.shape%0A%09%09print data.head(n=10)%0A%09elif verbose == 1:%0A%09%09print data.shape%0A%0A if __nam @@ -914,21 +914,21 @@ t%22, -required=True +default='sts' , ty @@ -993,16 +993,102 @@ pe=int)%0A + parser.add_argument(%22--glovefile%22, default='data/glove.6B.300d.tar.gz', type=str)%0A args @@ -1161,8 +1161,65 @@ erbose)%0A + gloveb300d = load_glove(args.glovefile, args.verbose)
d15432dda3a06c08ad36901a72c6301f958b72e0
Update OneFichierCom.py
module/plugins/hoster/OneFichierCom.py
module/plugins/hoster/OneFichierCom.py
# -*- coding: utf-8 -*- import re from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo class OneFichierCom(SimpleHoster): __name__ = "OneFichierCom" __type__ = "hoster" __version__ = "0.86" __status__ = "testing" __pattern__ = r'https?://(?:www\.)?(?:(?P<ID1>\w+)\.)?(?P<HOST>1fichier\.com|alterupload\.com|cjoint\.net|d(es)?fichiers\.com|dl4free\.com|megadl\.fr|mesfichiers\.org|piecejointe\.net|pjointe\.com|tenvoi\.com)(?:/\?(?P<ID2>\w+))?' __config__ = [("use_premium", "bool", "Use premium account if available", True)] __description__ = """1fichier.com hoster plugin""" __license__ = "GPLv3" __authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"), ("the-razer", "daniel_ AT gmx DOT net"), ("zoidberg", "zoidberg@mujmail.cz"), ("imclem", None), ("stickell", "l.stickell@yahoo.it"), ("Elrick69", "elrick69[AT]rocketmail[DOT]com"), ("Walter Purcaro", "vuolter@gmail.com"), ("Ludovic Lehmann", "ludo.lehmann@gmail.com")] COOKIES = [("1fichier.com", "LG", "en")] NAME_PATTERN = r'>File\s*Name :</td>\s*<td.*>(?P<N>.+?)<' SIZE_PATTERN = r'>Size :</td>\s*<td.*>(?P<S>[\d.,]+) (?P<U>[\w^_]+)' OFFLINE_PATTERN = r'File not found !\s*<' WAIT_PATTERN = r'>You must wait \d+ minutes' def setup(self): self.multiDL = self.premium self.resume_download = True def handle_free(self, pyfile): id = self.info['pattern']['ID1'] or self.info['pattern']['ID2'] url, inputs = self.parse_html_form('action="https://1fichier.com/\?%s' % id) if not url: self.fail(_("Download link not found")) if "pass" in inputs: inputs['pass'] = self.get_password() inputs['submit'] = "Download" self.download(url, post=inputs) def handle_premium(self, pyfile): self.download(pyfile.url, post={'dl': "Download", 'did': 0}) getInfo = create_getInfo(OneFichierCom)
Python
0
@@ -227,17 +227,17 @@ _ = %220.8 -6 +7 %22%0A __
d258bbe78be9cdf8ca2251add74a903f054b032a
add login/logout views. closes #7
app/urls.py
app/urls.py
"""testP URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from register import views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^', include('register.urls')) ]
Python
0.000219
@@ -716,29 +716,55 @@ rom -register import views +django.contrib.auth import views as auth_views%0A %0A%0Aur @@ -815,16 +815,496 @@ .urls),%0A + # Auth views. Look at this in order to see who to use%0A # https://docs.djangoproject.com/en/1.10/topics/auth/default/%0A%0A url(r'%5Eaccounts/login/$', auth_views.login, %7B'template_name': 'admin/login.html'%7D, name='login'),%0A url(r'%5Eaccounts/logout/$', auth_views.logout, name='logout'),%0A url(r'%5Eaccounts/password/$', auth_views.password_change, name='password_change'),%0A url(r'%5Eaccounts/password/done/$', auth_views.password_change_done, name='password_change_done'),%0A url( @@ -1339,6 +1339,7 @@ '))%0A - %5D%0A +%0A
c136d416c2cb53449e1c175412eeaa46a2f78db1
Fix syntax error in email service
zou/app/utils/emails.py
zou/app/utils/emails.py
from flask_mail import Message from zou.app import mail, app def send_email(subject, body, recipient_email, html=None): """ Send an email with given subject and body to given recipient. """ if html is None: html = body with app.app_context(): message = Message( sender="Kitsu Bot <no-reply@cg-wire.com>" body=body, html=html, subject=subject, recipients=[recipient_email] ) mail.send(message)
Python
0.001263
@@ -347,16 +347,17 @@ re.com%3E%22 +, %0A
d3adfcbcf281f00aa454d4d8e45f6d5502495bde
Add get_absolute_url to UserSerializer
api/users/serializers.py
api/users/serializers.py
from rest_framework import serializers as ser from api.base.serializers import JSONAPISerializer, LinksField, Link class UserSerializer(JSONAPISerializer): id = ser.CharField(read_only=True, source='_id') fullname = ser.CharField() date_registered = ser.DateTimeField(read_only=True) links = LinksField({ 'html': 'absolute_url', 'nodes': { 'relation': Link('users:user-nodes', kwargs={'pk': '<pk>'}) } }) # TODO: finish me class Meta: type_ = 'users' def update(self, instance, validated_data): # TODO pass
Python
0
@@ -522,16 +522,82 @@ users'%0A%0A + def absolute_url(self, obj):%0A return obj.absolute_url%0A%0A def
4b3f79ae5e30de867941d363d1f186d3c2494b4b
Remove obsolete token code.
api_sample/http_utils.py
api_sample/http_utils.py
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils for http response and content handling. Used for common cracking of the content document returned from a request. """ import json import logging import sys # Http response codes to retry - includes quota issues. # 402: Payment required # 408: Request timeout # 503: Service unavailable # 504: Gateway timeout RETRY_RESPONSE_CODES = [402, 408, 503, 504] def _FromJsonString(json_string): """Helper to safely attempt a conversion from a json string to an object. Args: json_string: Presumably properly formatted json string. Returns: Object reflecting the conversion of the json. """ try: return json.loads(json_string) except ValueError as e: print 'ERROR: response is not valid json: %s\n%s.' % (e, json_string) sys.exit(1) def ParseHttpResult(url, response, content): """Helper to more clearly find and return error messages. Args: url: full url including https:// for the RESTful API command. response: response with headers from http. content: content from the url (unzipped if necessary). Returns: If error text is discovered, returns a string with the error text otherwise returns an object containing the content. """ logging.getLogger().debug('----------------------------------------') logging.getLogger().debug('status=%d' % response.status) logging.getLogger().debug('----------------------------------------') logging.getLogger().debug('content=\n%s' % content) logging.getLogger().debug('----------------------------------------') if response.status in RETRY_RESPONSE_CODES: print 'Possible quota problem (%d). %s. You should retry.' % ( response.status, url) sys.exit(1) content = _FromJsonString(content) if 'error' in content: error_text = ['ERROR: status=%d.' % response.status] error_text += ['url=%s.' % url] # The content:error.message seems to be more useful to users. Retrieve it. message = content.get('error', {}).get('message') if message: error_text += ['message=%s' % message] else: error_text += ['content=%s' % content] # The provisioning API is not available if the box is not checked. if (response.status == 403 and message == 'Domain cannot use apis.'): error_text = [message, 'You should check "Enable provisioning API" ' 'in your Domain Settings->User Settings.'] # When requesting tokens for a specific client_id, if no tokens # are found, the API server responds with an unexpected 500 error. # Notice that specific case and fail a little more gracefully. elif (response.status == 500 and message == 'No tokens exist for the specified client id'): error_text = [message] return '\n'.join(error_text) return content
Python
0.000011
@@ -2985,349 +2985,8 @@ .'%5D%0A - # When requesting tokens for a specific client_id, if no tokens%0A # are found, the API server responds with an unexpected 500 error.%0A # Notice that specific case and fail a little more gracefully.%0A elif (response.status == 500 and%0A message == 'No tokens exist for the specified client id'):%0A error_text = %5Bmessage%5D%0A
45689b8b2d91310e4002a63253009fddae947bb3
Bump copyright year in sphinx docs
doc/conf.py
doc/conf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys from pycommand import __version__ as pycommand_version extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = 'pycommand' copyright = '2013, Benjamin Althues' version = pycommand_version release = pycommand_version exclude_patterns = ['_build'] pygments_style = 'sphinx' html_theme = 'nature' man_pages = [ ('index', 'pycommand', 'pycommand Documentation', ['Benjamin Althues'], 3) ] texinfo_documents = [ ('index', 'pycommand', 'pycommand Documentation', 'Benjamin Althues', 'pycommand', 'One line description of project.', 'Miscellaneous'), ]
Python
0
@@ -51,76 +51,17 @@ ort -os%0Aimport sys%0Afrom pycommand import __version__ as pycommand_version +pycommand %0A%0Aex @@ -232,16 +232,21 @@ = '2013 +-2015 , Benjam @@ -276,24 +276,26 @@ ycommand +._ _version %0Arelease @@ -286,16 +286,18 @@ _version +__ %0Arelease @@ -308,24 +308,26 @@ ycommand +._ _version %0Aexclude @@ -318,16 +318,18 @@ _version +__ %0Aexclude @@ -619,42 +619,25 @@ d', -'One line description of project.' +pycommand.__doc__ ,%0A
60b9dcdd06e67653f5a177fd7f4cfb82e9fc4be3
Add 🔥 and ❄️ emojis for "hot" and "cold" statuses fix #16
database.py
database.py
import logging import shortener import timeago import datetime from google.appengine.ext import ndb from google.appengine.api import memcache from helper import development from apis.telegram import send_message class StoryPost(ndb.Model): title = ndb.StringProperty() text = ndb.TextProperty() message = ndb.TextProperty() url = ndb.TextProperty() short_url = ndb.TextProperty(indexed=False) short_hn_url = ndb.TextProperty(indexed=False) score = ndb.IntegerProperty(indexed=False) telegram_message_id = ndb.IntegerProperty() created = ndb.DateTimeProperty(auto_now_add=True) def add_memcache(self): memcache.set(self.key.id(), self.url) @classmethod def add(cls, story): story_id_int = story.get('id') story_id = str(story_id_int) short_id = shortener.encode(story_id_int) hn_url = "https://news.ycombinator.com/item?id={}".format(story_id) story_url = story.get('url') # Check memcache and databse, maybe this story was already sent if memcache.get(story_id): logging.info('STOP: {} in memcache'.format(story_id)) return post = ndb.Key(cls, story_id).get() if post: logging.info('STOP: {} in DB'.format(story_id)) post.add_memcache() return logging.info('SEND: {}'.format(story_id)) story['title'] = story.get('title').encode('utf-8') comments_count = story.get('descendants', 0) buttons = [] if development(): short_hn_url = 'http://localhost:8080/c/{}'.format(short_id) else: short_hn_url = 'https://readhacker.news/c/{}'.format(short_id) if story_url: if development(): short_url = 'http://localhost:8080/s/{}'.format(short_id) else: short_url = 'https://readhacker.news/s/{}'.format(short_id) buttons.append({ 'text': 'Read', 'url': story_url }) else: short_url = short_hn_url story['url'] = hn_url buttons.append({ 'text': '{}+ Comments'.format(comments_count), 'url': hn_url }) # Get the difference between published date and when 100+ score was reched now = datetime.datetime.now() published = datetime.datetime.fromtimestamp(story.get('time')) ago = timeago.format(now, published) # Add title message = '<b>{title}</b> (Score: {score}+ {ago})\n\n'.format(ago=ago, **story) # Add link message += '<b>Link:</b> {}\n'.format(short_url) # Add comments Link(don't add it for `Ask HN`, etc) if story_url: message += '<b>Comments:</b> {}\n'.format(short_hn_url) # Add text text = story.get('text') if text: text = text.replace('<p>', '\n').replace('&#x27;', "'") \ .replace('&#x2F;', '/').encode('utf-8') message += "\n{}\n".format(text) # Send to the telegram channel if development(): result = send_message('@hacker_news_feed_st', message, {'inline_keyboard': [buttons]}) else: result = send_message('@hacker_news_feed', message, {'inline_keyboard': [buttons]}) logging.info('Telegram response: {}'.format(result)) telegram_message_id = None if result and result.get('ok'): telegram_message_id = result.get('result').get('message_id') post = cls(id=story_id, title=story.get('title'), url=story.get('url'), score=story.get('score'), text=story.get('text'), short_url=short_url, short_hn_url=short_hn_url, message=message, telegram_message_id=telegram_message_id) post.put() post.add_memcache()
Python
0.001821
@@ -1,12 +1,29 @@ +# coding: utf-8%0A%0A import loggi @@ -225,16 +225,94 @@ essage%0A%0A +TWO_HOURS = datetime.timedelta(hours=2)%0ATWO_DAYS = datetime.timedelta(days=2)%0A %0Aclass S @@ -2340,24 +2340,312 @@ published)%0A%0A + # Add %F0%9F%94%A5 emoji if story is hot and gained required score in less than 2 hours,%0A # or add %E2%9D%84%EF%B8%8F if it took it more than 2 days%0A status_emoji = ''%0A delta = now - published%0A if delta %3C= TWO_HOURS:%0A status_emoji = '%F0%9F%94%A5 '%0A elif delta %3E= TWO_DAYS:%0A status_emoji = '%E2%9D%84%EF%B8%8F '%0A%0A # Add ti @@ -2679,16 +2679,30 @@ e%7D%3C/b%3E ( +%7Bstatus_emoji%7D Score: %7B @@ -2732,15 +2732,51 @@ mat( -ago=ago +%0A ago=ago, status_emoji=status_emoji , ** @@ -3790,16 +3790,23 @@ + score=st @@ -3847,32 +3847,39 @@ text'),%0A + + short_url=short_ @@ -3910,16 +3910,23 @@ hn_url,%0A +
cae93790520765d85ed990e44b8733b44ca5eace
Modify the OpsCenter startup script to generate an SSH keypair and push the public key to the GCP project metadata using gCloud
datastax.py
datastax.py
import yaml def GenerateFirewall(context): name = 'opscenterfirewall-' + context.env['name'] firewalls = [ { 'name': name, 'type': 'compute.v1.firewall', 'properties': { 'sourceRanges': [ '0.0.0.0/0' ], 'allowed': [{ 'IPProtocol': 'tcp', 'ports': ['8888', '8443'] }] } } ] return firewalls def GenerateConfig(context): config = {'resources': []} zonal_clusters = { 'name': 'clusters-' + context.env['name'], 'type': 'regional_multi_vm.py', 'properties': { 'sourceImage': 'https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1410-utopic-v20150625', 'zones': context.properties['zones'], 'machineType': context.properties['machineType'], 'network': 'default', 'numberOfVMReplicas': context.properties['nodesPerZone'], 'disks': [ { 'deviceName': 'vm-test-data-disk', 'type': 'PERSISTENT', 'boot': 'false', 'autoDelete': 'true', 'initializeParams': { 'diskType': 'pd-ssd', 'diskSizeGb': context.properties['diskSize'] } } ], 'bootDiskType': 'pd-standard', 'metadata': { 'items': [ { 'key': 'startup-script', 'value': '''| #!/bin/bash mkdir -p /mnt/data chmod 777 /mnt/data /usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" /dev/disk/by-id/google-${HOSTNAME}-test-data-disk /mnt/data apt-get update apt-get install openjdk-7-jdk -yqq ''' } ] } } } ops_center_node = { 'name': 'opscenter-' + context.env['name'], 'type': 'vm_instance.py', 'properties': { 'sourceImage': 'https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1410-utopic-v20150625', 'zone': context.properties['opsCenterZone'], 'machineType': context.properties['machineType'], 'network': 'default', 'bootDiskType': 'pd-standard', 'metadata': { 'items': [ { 'key': 'startup-script', 'value': '''| #! /bin/bash apt-get update apt-get install openjdk-7-jdk -yqq echo "Installing OpsCenter" echo "deb http://debian.datastax.com/community stable main" | tee -a /etc/apt/sources.list.d/datastax.community.list curl -L http://debian.datastax.com/debian/repo_key | apt-key add - apt-get -y install opscenter=5.2.1 echo "Starting OpsCenter" sudo service opscenterd start echo "Waiting for OpsCenter to start..." sleep 15 ''' } ] } } } config['resources'].append(zonal_clusters) config['resources'].append(ops_center_node) config['resources'].extend(GenerateFirewall(context)) return yaml.dump(config)
Python
0
@@ -1679,73 +1679,160 @@ ''' -%7C %0A -#!/bin/bash%0A mkdir -p /mnt/data%0A chmod 777 /mnt/data%0A + #!/bin/bash%0A mkdir -p /mnt/data%0A chmod 777 /mnt/data%0A @@ -1949,39 +1949,83 @@ nt/data%0A -apt-get update%0A + apt-get update%0A apt-get @@ -2051,16 +2051,18 @@ dk -yqq%0A + @@ -2610,32 +2610,186 @@ 'pd-standard',%0A + 'serviceAccounts': %5B%7B%0A 'email': 'default',%0A 'scopes': %5B 'https://www.googleapis.com/auth/compute' %5D%0A %7D%5D,%0A 'met @@ -2936,46 +2936,399 @@ ''' -%7C %0A -#! /bin/bash%0A apt-get update%0A + #! /bin/bash%0A ssh-keygen -b 2048 -t rsa -f /tmp/sshkey -q -N %22%22%0A echo -n 'root:' %7C cat - /tmp/sshkey.pub %3E temp && mv temp /tmp/sshkey.pub%0A gcloud compute project-info add-metadata --metadata-from-file sshKeys=/tmp/sshkey.pub%0A apt-get update%0A @@ -3363,16 +3363,38 @@ k -yqq%0A%0A + echo @@ -3413,24 +3413,46 @@ OpsCenter%22%0A + echo %22de @@ -3560,16 +3560,38 @@ ty.list%0A + curl @@ -3653,16 +3653,38 @@ y add -%0A + apt- @@ -3715,16 +3715,38 @@ =5.2.1%0A%0A + echo @@ -3763,24 +3763,46 @@ OpsCenter%22%0A + sudo ser @@ -3824,16 +3824,38 @@ start%0A%0A + echo @@ -3891,16 +3891,38 @@ art...%22%0A + slee @@ -3926,16 +3926,18 @@ leep 15%0A +
931614b081b5e6461e6706e3fde884fcfc889f55
Update docs copyright notice
doc/conf.py
doc/conf.py
# -*- coding: utf-8 -*- # # Motor documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. import sys, os sys.path[0:0] = [os.path.abspath('..')] from pymongo import version as pymongo_version import motor # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.todo', 'doc.mongo_extensions', 'doc.motor_extensions', 'sphinx.ext.intersphinx', 'doc.coroutine_annotation'] primary_domain = 'py' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Motor' copyright = u'2016 MongoDB, Inc.' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = motor.version # The full version, including alpha/beta/rc tags. release = motor.version # List of documents that shouldn't be included in the build. unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for extensions ---------------------------------------------------- autoclass_content = 'init' doctest_path = [os.path.abspath('..')] # Don't test examples pulled from PyMongo's docstrings just because they start # with '>>>' doctest_test_doctest_blocks = '' doctest_global_setup = """ import pprint import sys from datetime import timedelta from tornado import gen from tornado.ioloop import IOLoop import pymongo from pymongo.mongo_client import MongoClient sync_client = MongoClient() ismaster = sync_client.admin.command('isMaster') server_info = sync_client.server_info() if 'setName' in ismaster: raise Exception( "Run doctests with standalone MongoDB 3.6 server, not a replica set") if ismaster.get('msg') == 'isdbgrid': raise Exception( "Run doctests with standalone MongoDB 3.6 server, not mongos") if server_info['versionArray'][:2] != [3, 6]: raise Exception( "Run doctests with standalone MongoDB 3.6 server, not %s" % ( server_info['version'], )) sync_client.drop_database("doctest_test") db = sync_client.doctest_test import motor from motor import MotorClient """ # -- Options for HTML output --------------------------------------------------- html_copy_source = False # Theme gratefully vendored from CPython source. html_theme = "pydoctheme" html_theme_path = ["."] html_theme_options = {'collapsiblesidebar': True} html_static_path = ['static'] html_sidebars = { 'index': ['globaltoc.html', 'searchbox.html'], } # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'Motor' + release.replace('.', '_') # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Motor.tex', u'Motor Documentation', u'A. Jesse Jiryu Davis', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True autodoc_default_flags = ['inherited-members'] autodoc_member_order = 'groupwise' pymongo_inventory = ('http://api.mongodb.com/python/%s/' % pymongo_version, None) intersphinx_mapping = { 'bson': pymongo_inventory, 'gridfs': pymongo_inventory, 'pymongo': pymongo_inventory, 'aiohttp': ('http://aiohttp.readthedocs.io/en/stable/', None), 'tornado': ('http://www.tornadoweb.org/en/stable/', None), 'python': ('https://docs.python.org/3/', None), }
Python
0
@@ -1044,16 +1044,24 @@ = u'2016 +-present MongoDB
173fcbb828f69530bace267bcd084f083a81d7e3
Update copyright year
doc/conf.py
doc/conf.py
# -*- coding: utf-8 -*- # # Moulder documentation build configuration file, created by # sphinx-quickstart on Sun Dec 14 11:15:19 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't pickleable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If your extensions are in another directory, add it here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.append(os.path.abspath('../python')) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] # Link to standard Python documentation intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Sali lab web framework' copyright = '2009-2020, Sali Lab' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = 'SVN' # The full version, including alpha/beta/rc tags. release = 'SVN' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['.build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # Options for HTML output # ----------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'classic' # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = 'Web framework documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['.static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, the reST sources are included in the HTML build as _sources/<name>. #html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'saliwebdoc' # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'Saliweb.tex', 'Web service Documentation', 'Sali Lab', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True autodoc_mock_imports = ["flask", "MySQLdb"]
Python
0.000001
@@ -1464,17 +1464,17 @@ 2009-202 -0 +1 , Sali L
858132382c57d181d5865162c8ff87db656d4de9
split up functions better
artsy-dl.py
artsy-dl.py
#!/usr/bin/env python -tt ########################################################### ## ## ## artsy-dl.py ## ## ## ## Author: Tony Fischetti ## ## tony.fischetti@gmail.com ## ## ## ########################################################### """ this script is brittle """ __author__ = 'Tony Fischetti' __version__ = '0.1' import sys import lxml.html from lxml.cssselect import CSSSelector import requests import html2text as h2t import re import wget import os THE_URL = sys.argv[1] FN_TEMPLATE = sys.argv[2] ARTIST_CSS = CSSSelector(".entity-link") TITLE_CSS = CSSSelector(".artwork-metadata__title em") LINK_CSS = CSSSelector(".js-artwork-images__images__image__display__img") ARTIST_REGEX = re.compile('\[(.+?)\]', re.UNICODE) TITLE_REGEX = re.compile('_(.+?)_, (\d+)', re.UNICODE) LINK_REGEX = re.compile('img data-src="(.+?)"') def cop_out(f): def inner(*args, **kargs): try: return f(*args, **kargs) except Exception as e: print("The function <{}> failed".format(f.__name__)) sys.exit(1) return inner @cop_out def get_text_from_element(element): return h2t.html2text(lxml.html.tostring(element).decode("utf-8")).rstrip() @cop_out def get_tree(url): r = requests.get(url) return lxml.html.fromstring(r.text) @cop_out def get_artist(tree): artist_text = get_text_from_element(ARTIST_CSS(tree)[0]) final_artist = ARTIST_REGEX.search(artist_text).group(1) return final_artist @cop_out def get_title_and_date(tree): title_text = get_text_from_element(TITLE_CSS(tree)[0]) tmp = TITLE_REGEX.search(title_text) return tmp.group(1), tmp.group(2) @cop_out def get_link(tree): link_text = lxml.html.tostring(LINK_CSS(tree)[0]).decode("utf-8") final_link = LINK_REGEX.search(link_text).group(1) return final_link @cop_out def get_good_filename(artist, title, date): template = FN_TEMPLATE if "%a" in FN_TEMPLATE: template = template.replace("%a", artist) if "%t" in FN_TEMPLATE: template = template.replace("%t", title) if "%d" in FN_TEMPLATE: template = template.replace("%d", date) return template def main(): tree = get_tree(THE_URL) artist = get_artist(tree) title, date = get_title_and_date(tree) link = get_link(tree) old_filename = wget.download(link) tmp, extension = os.path.splitext(old_filename) good_filename = get_good_filename(artist, title, date) os.rename(old_filename, "{}{}".format(good_filename, extension)) if __name__ == '__main__': STATUS = main() sys.exit(STATUS)
Python
0.000078
@@ -2115,28 +2115,27 @@ out%0Adef get_ -good +new _filename(ar @@ -2438,189 +2438,180 @@ e%0A%0A%0A -%0Adef main( +@cop_out%0Adef download_image(link ):%0A -tre +old_filenam e = +w get -_tree(THE_URL +.download(link )%0A -%0A -artist = get_artist(tree)%0A title, date = get_title_and_date(tree)%0A link = get_link(tree)%0A%0A old_filename = wget.download(link) +return old_filename%0A%0A%0A@cop_out%0Adef rename_downloaded_image(old_filename, artist, title, date): %0A @@ -2663,20 +2663,19 @@ me)%0A -good +new _filenam @@ -2682,20 +2682,19 @@ e = get_ -good +new _filenam @@ -2762,12 +2762,11 @@ mat( -good +new _fil @@ -2784,16 +2784,281 @@ nsion))%0A + return True%0A%0A%0A%0Adef main():%0A tree = get_tree(THE_URL)%0A%0A artist = get_artist(tree)%0A title, date = get_title_and_date(tree)%0A link = get_link(tree)%0A%0A old_filename = download_image(link)%0A rename_downloaded_image(old_filename, artist, title, date)%0A%0A %0A%0A%0A%0Aif _
37f1c32cc1e6a81d103c018c91ae79359ee0d892
Modify get_works_detail method
etl.py
etl.py
#!/usr/bin/python # -*- coding: utf-8 -*- import requests from bs4 import BeautifulSoup as bs import re from dao import DAO import datetime class ETL: def __init__(self): self.dao = DAO() def get_monthly_ranking(self): count = 0 ranking = [] intervals = ["1_20", "21_40", "41_60", "61_80", "81_100"] for interval in intervals: url = "http://www.dmm.co.jp/mono/dvd/-/ranking/=/term=monthly/mode=actress/rank=" + interval r = requests.get(url) soup = bs(r.text) actresses = soup.find_all("td", {"class": "bd-b"}) for actress in actresses: actress_a = actress.find("a") pattern = re.compile("/mono/dvd/-/list/=/article=actress/id=(.*)/") match = pattern.search(actress_a.get("href")) actress_id = match.group(1) actress_img = actress_a.find("img").get("src") actress_data = actress.find("div", {"class": "data"}).find("p").find("a") actress_name = actress_data.text # actress_url = "http://www.dmm.co.jp" + actress_data.get("href") # + "sort=date/" ranking.append({"id": actress_id, "name": actress_name, "img": actress_img}) count += 1 self.dao.hmset_actresses(ranking) return ranking def get_new_works(self): day = str(datetime.datetime.now().day) url = "http://www.dmm.co.jp/mono/dvd/-/calendar/=/day=" + day + "-" + day + "/" r = requests.get(url) soup = bs(r.text) cal = soup.find("table", {"id": "monocal"}) works_list = cal.find_all("tr") if len(works_list) == 0: return new_works = list() for works in works_list: actress_tag = works.find("td", {"class": "info-01"}) if actress_tag is None or actress_tag.text == "----": continue pattern = re.compile("/mono/dvd/-/list/=/article=actress/id=(.*)/") match = pattern.search(actress_tag.find("a").get("href")) actress_id = match.group(1) if not self.dao.is_actress_exists_by_id(actress_id): continue title_tag = works.find("td", {"class": "title-monocal"}) title = title_tag.find("a") title_name = title.text pattern = re.compile(ur"(^(【数量限定】|【DMM限定】|【DMM限定販売】|【アウトレット】)|(ブルーレイディスク)$)", re.UNICODE) match = re.search(pattern, title_name) if match: continue title_url = "http://www.dmm.co.jp" + title.get("href") cover_url = self.get_works_cover(title_url) if cover_url is None: continue else: new_works.append({"id": actress_id, "img": cover_url}) return new_works def get_works_cover(self, url): r = requests.get(url) soup = bs(r.text) sample = soup.find("div", {"class": "tx10 pd-3 lh4"}) if sample is None: return # No Image a_tag = sample.find("a") if a_tag is None: return print a_tag.get('href') performer = soup.find("span", {"id": "performer"}) performer_a_tag = performer.find_all("a") if len(performer_a_tag) == 1: pattern = re.compile("/mono/dvd/-/detail/=/cid=(.*)/") match = pattern.search(url) cid = match.group(1) print cid works = self.dao.find_one_works_by_id(actress_id) print works if works is not None and cid in works: return else: self.dao.update_one_works_by_id(actress_id, cid) return a_tag.get('href') else: return
Python
0.000001
@@ -2667,24 +2667,21 @@ -cover_ur +detai l = self @@ -2691,21 +2691,22 @@ t_works_ -cover +detail (title_u @@ -2707,16 +2707,28 @@ itle_url +, actress_id )%0A @@ -2736,24 +2736,21 @@ if -cover_ur +detai l is Non @@ -2832,44 +2832,14 @@ end( -%7B%22id%22: actress_id, %22img%22: cover_url%7D +detail )%0A%0A @@ -2885,13 +2885,14 @@ rks_ -cover +detail (sel @@ -2897,16 +2897,28 @@ elf, url +, actress_id ):%0A @@ -3801,16 +3801,42 @@ return + %7B%22id%22: actress_id, %22img%22: a_tag.g @@ -3845,16 +3845,17 @@ ('href') +%7D %0A
7502dca0df5c7c3ea247b3bf63b2d60e7cad74ce
Remove redundant text search dictionary
anthology/database.py
anthology/database.py
"""MongoDB backend""" from pymongo import MongoClient, ASCENDING from bson import ObjectId class DatabaseError(Exception): """Raised for unrecoverable database errors""" pass def connection(): """Return MongoClient connection object. pymongo.MongoClient has it's own instance caching/connection pool. MongoMock however does not, so we will mockup this in tests and provide our own instance cache. """ return MongoClient() def db_songs(): """Return songs collection""" return connection().anthology.songs def db_averages(): """Return averages collection""" return connection().anthology.averages def get_songs_list(previous_id, limit, search_term=None, search_word=None): """Return songs from database. Parameters `previous_id` and `limit` are used to iterate over result set. Search is performed using parameter `search_term`. This performs search on text index for documents. Index is always required and without it search will fail. :offset: Number of items to skip :limit: Number of returned items :search_term: Partial word search term :search_word: Full word search term :returns: Iterable cursor object """ query = [{}] # Search for partial words if search_term: search = {'$text': { '$search': search_term, '$language': 'none', '$caseSensitive': False, '$diacriticSensitive': False }} regex = {'$regex': search_term, '$options': 'i'} query.append({'$or': [{'title': regex}, {'artist': regex}]}) # Search for full words if search_word: search = {'$text': { '$search': search_word, '$language': 'none', '$caseSensitive': False, '$diacriticSensitive': False }} query.append(search) if previous_id: query.append({'_id': {'$gt': ObjectId(previous_id)}}) return db_songs().find({'$and': query}).sort('_id', ASCENDING).limit(limit) def get_average_difficulty(level): """Return average difficulty for all songs on given level. If difficulty is not given, return difficulty for all songs in database. :level: Song level to search :collection: Collection to search from :returns: Dictionary with level and average difficulty """ collection = db_songs() pipeline = [ {"$group": { "_id": None, "average_difficulty": {"$avg": "$difficulty"} }}] if level: pipeline.insert(0, {"$match": {"level": level}}) results = collection.aggregate(pipeline) try: result = results.next() result["algorithm"] = 'trivial' return result except StopIteration: return {} def filter_songs_by_level(songs, level): """Filter out all bad values""" for song in songs: if level is None: yield song if level == song["level"]: yield song def get_average_difficulty_fun(level): """Just for fun implementation for averages. Most data was already batch processed beforehand, so we can calculate rest efficiently in Python. If difficulty is not given, return difficulty for all songs in database. :level: Song level to search :collection: Collection to search from :returns: Dictionary with level and average difficulty """ totals = db_averages().find() total_difficulty = 0 number_of_songs = 0 for total in filter_songs_by_level(totals, level): # BUGBUG: This will overflow with big dataset total_difficulty += total["total_difficulty"] number_of_songs += total["number_of_songs"] if number_of_songs == 0: return {} average_difficulty = total_difficulty / float(number_of_songs) return { 'level': level, 'average_difficulty': average_difficulty, 'algorithm': 'fun'} def get_song(song_id): """Return song with given id""" return db_songs().find_one({'_id': ObjectId(song_id)}) def update_song(song_id, fields): """Return song with given id""" db_songs().update_one( {'_id': ObjectId(song_id)}, {'$set': fields})
Python
0.999238
@@ -1289,196 +1289,8 @@ rm:%0A - search = %7B'$text': %7B%0A '$search': search_term,%0A '$language': 'none',%0A '$caseSensitive': False,%0A '$diacriticSensitive': False%0A %7D%7D%0A%0A
69912ea520a35f8c35a62d7a3c6efe1a9367f03f
Fix typo
ckanext/mapactiontheme/tests/test_admin_controller.py
ckanext/mapactiontheme/tests/test_admin_controller.py
from ckan.plugins.toolkit import config import ckan.tests.helpers as helpers import ckan.tests.factories as factories from ckan.plugins import load class TestCustomAdminController(helpers.FunctionalTestBase): def setup(self): super(TestCustomAdminController, self).setup() self.admin = factories.User(name='adminuser', sysadmin=True) self.editor = factories.User(name='editor') self.user = factories.User(name='user') self.organization = factories.Organization( name='mapaction', user=self.admin) self.site_url = config.get("ckan.site_url") self.app = self._get_test_app() def test_editor_user(self): context = {'ignore_auth': True} helpers.call_action( 'organization_member_create', context, id=self.organization['id'], username=self.editor['name'], role='editor' ) env = {'REMOTE_USER': self.editor['name'].encode('utf-8')} response = self.app.get( url='%s/ckan-admin/trash' % self.site_url, extra_environ=env, ) assert "Purge" in response.body assert response.status_int == 200 def test_admin_user(self): env = {'REMOTE_USER': self.admin['name'].encode('utf-8')} response = self.app.get( url='%s/ckan-admin/trash' % self.site_url, extra_environ=env, ) assert "Purge" in response.body assert response.status_int == 200 def test_random_user(self): env = {'REMOTE_USER': self.user['name'].encode('utf-8')} response = self.app.get( url='%s/ckan-admin/trash' % self.site_url, extra_environ=env, expect_errors=True ) assert response.status_int == 403 def test_no_user(self): """Test without a loged in user""" env = {'REMOTE_USER': ''} response = self.app.get( url='%s/ckan-admin/trash' % self.site_url, extra_environ=env, expect_errors=True ) assert response.status_int == 403
Python
0.999999
@@ -1869,16 +1869,17 @@ ut a log +g ed in us
3ae496284e86815304736196bd66052fbfc9b81d
Support 'I Want you'
YoClient.py
YoClient.py
#!/usr/bin/env python import httplib import urllib class YoClient: Host = 'api.justyo.co' Port = 80 NoticeAPI = '/yo/' BroadcastAPI = '/yoall/' Headers = {'Cache-Control': 'no-cache', 'Content-Type': 'application/x-www-form-urlencoded'} #Proxy = 'PROXY-HOSTNAME:PORT' Proxy = None Token = '' #Parse your token here Error = None link = None def notice(self, username, link=None): username = username.upper() self.setLink(link) param = { 'username' : username, 'api_token' : self.Token, } if self.link is not None: param['link'] = self.link return self._action(self.NoticeAPI, param) def broadcast(self, link=None): self.setLink(link) param = { 'api_token' : self.Token } if self.link is not None: param['link'] = self.link return self._action(self.BroadcastAPI, param) def setLink(self, link): if link is not None: self.link = link def _action(self, API, param): param = urllib.urlencode(param) if self.Proxy is not None: conn = httplib.HTTPConnection(self.Proxy) API = 'http://' + self.Host + API else: conn = httplib.HTTPConnection(host=self.Host, port=self.Port) conn.request("POST", API, param, self.Headers) result = conn.getresponse() status = result.status / 100 == 2 if not status: self.Error = result.read() conn.close() return status if __name__ == '__main__': import sys conn = YoClient() link = 'https://github.com/litrin/YoClient' if len(sys.argv) > 1: username = sys.argv[1] status = conn.notice(username, link) else: status = conn.broadcast(link) if (status): exit(0) exit(1)
Python
0
@@ -1662,16 +1662,338 @@ status%0A%0A +%0Aclass IWantYo(YoClient):%0A%0A _imageGenratorUrl = 'http://www.hetemeel.com/unclesamshow.php'%0A%0A def setLink(self, text):%0A YoClient.setLink(self, self._genrateUrl(text))%0A%0A def _genrateUrl(self, text):%0A return %22%25s?%25s%22 %25 (self._imageGenratorUrl, urllib.urlencode(%7B'text' : text%7D))%0A%0A setText = setLink%0A%0A if __nam @@ -2037,24 +2037,23 @@ conn = -YoClient +IWantYo ()%0A l
1c837e69d08fd30727e7c7756bb263d56ea5593c
store year, month and date always as integers
__init__.py
__init__.py
# http://sourceforge.net/mailarchive/message.php?msg_id=25355232 # http://dret.net/bibconvert/tex2unicode #from citeproc import import re from lxml import objectify from warnings import warn from . import csl from ..util import set_xml_catalog from ..warnings import PyteWarning class CustomDict(dict): def __init__(self, args, required=set(), optional=set(), required_or=[]): passed_keywords = set(args.keys()) missing = required - passed_keywords if missing: raise TypeError('The following required arguments are missing: ' + ', '.join(missing)) required_or_merged = set() for required_options in required_or: if not passed_keywords & required_options: raise TypeError('Require at least one of: ' + ', '.join(required_options)) required_or_merged |= required_options unsupported = passed_keywords - required - optional - required_or_merged if unsupported: cls_name = self.__class__.__name__ warn('The following arguments for {} are '.format(cls_name) + 'unsupported: ' + ', '.join(unsupported)) self.update(args) def __getattr__(self, name): return self[name] class Reference(CustomDict): def __init__(self, key, type, **args): self.key = key self.type = type #required_or = [set(csl.VARIABLES)] optional = ({'uri', 'container_uri', 'contributor', 'date'} | set(csl.VARIABLES)) super().__init__(args, optional=optional) class Name(CustomDict): def __init__(self, **args): if 'name' in args: required = {'name'} optional = {} else: required = {'given', 'family'} optional = {'dropping-particle', 'non-dropping-particle', 'suffix'} super().__init__(args, required, optional) def parts(self): return (self.get('given'), self.get('family'), self.get('dropping-particle'), self.get('non-dropping-particle'), self.get('suffix')) def given_initials(self): names = re.split(r'[- ]', self.given) return ' '.join('{}.'.format(name[0]) for name in names) class DateBase(CustomDict): def __init__(self, args, required=set(), optional=set()): optional = {'circa'} | optional super().__init__(args, required, optional) # defaults if 'circa' not in self: self['circa'] = False class Date(DateBase): def __init__(self, **args): required = {'year'} optional = {'month', 'day', 'season'} if 'day' in args and 'month' not in args: raise TypeError('When specifying the day, you should also specify ' 'the month') super().__init__(args, required, optional) def __eq__(self, other): # TODO: for sorting raise NotImplementedError class DateRange(DateBase): def __init__(self, **args): required = {'begin'} optional = {'end'} super().__init__(args, required, optional) def __eq__(self, other): # TODO: for sorting raise NotImplementedError class Bibliography(list): def __init__(self, source, formatter): self.source = source self.formatter = formatter formatter.bibliography = self def cite(self, id): try: reference = self.source[id] except KeyError: warning = "Unknown reference ID '{}'".format(id) warn(warning, PyteWarning) return '[{}]'.format(warning) self.append(reference) return self.formatter.format_citation(reference) def bibliography(self, target): return self.formatter.format_bibliography(target) class BibliographyFormatter(object): def __init__(self): pass def format_citation(self, reference): raise NotImplementedError def format_bibliography(self, target): raise NotImplementedError class BibliographySource(dict): def add(self, entry): self[entry.key] = entry class PseudoCSLDataXML(BibliographySource): def __init__(self, filename): set_xml_catalog() self.parser = objectify.makeparser(remove_comments=True, no_network=True) self.xml = objectify.parse(filename, self.parser) self.root = self.xml.getroot() for ref in self.root.ref: self.add(self.parse_reference(ref)) def parse_reference(self, ref): key = str(ref.attrib['id']) authors = self.parse_authors(ref.author) issued = self.parse_date(ref.issued) if ref.type.text == 'article-journal': return Reference(key, type=csl.type.ARTICLE, author=authors, title=ref.title.text, container_title=ref.find('container-title').text, issued=issued) elif ref.type.text == 'paper-conference': return Reference(key, type=csl.type.PAPER_CONFERENCE, author=authors, title=ref.title.text, container_title=ref.find('container-title').text, issued=issued) else: raise NotImplementedError def parse_authors(self, author): authors = [] for name in author.name: authors.append(self.parse_name(name)) return authors def parse_name(self, name): return Name(given=name.given.text, family=name.family.text) def parse_date(self, date): return Date(year=date.year.text, month=date.month.text) class MODS(BibliographySource): pass
Python
0.000248
@@ -2859,16 +2859,80 @@ month')%0A + args = %7Bkey: int(value) for key, value in args.items()%7D%0A
9ccbc97652db1b7e6c7888b783722eee9f438104
make cbpro visible to tests
__init__.py
__init__.py
Python
0
@@ -0,0 +1,116 @@ +# for tests%0Afrom cbpro.authenticated_client import AuthenticatedClient%0Afrom cbpro.public_client import PublicClient%0A
22d466a0f7010976f8cc340f77cc33e325ea6946
Add route to add a photo to an user
sigma_core/views/user.py
sigma_core/views/user.py
# -*- coding: utf-8 -*- import random import string from django.core.mail import send_mail from django.http import Http404 from rest_framework import viewsets, decorators, status from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated, AllowAny from dry_rest_permissions.generics import DRYPermissions from sigma_core.models.user import User from sigma_core.serializers.user import BasicUserWithPermsSerializer, DetailedUserWithPermsSerializer reset_mail = { 'from_email': 'support@sigma.fr', 'subject': 'Mot de passe Sigma', 'message': u""" Bonjour, Ton mot de passe Sigma a été réinitialisé. C'est maintenant "{password}". Cordialement, L'équipe Sigma. """ } class UserViewSet(viewsets.ModelViewSet): permission_classes = [IsAuthenticated, DRYPermissions, ] queryset = User.objects.all() serializer_class = BasicUserWithPermsSerializer # by default, basic data and permissions def retrieve(self, request, pk=None): """ Retrieve an User according to its id (pk). --- response_serializer: DetailedUserWithPermsSerializer """ try: user = User.objects.get(pk=pk) except User.DoesNotExist: raise Http404() # Use DetailedUserWithPermsSerializer to have the groups whom user belongs to serializer = DetailedUserWithPermsSerializer(user, context={'request': request}) return Response(serializer.data, status=status.HTTP_200_OK) def update(self, request, pk=None): try: user = User.objects.get(pk=pk) except User.DoesNotExist: raise Http404() # Names edition is allowed to Sigma admins only if ((request.data['lastname'] != user.lastname or request.data['firstname'] != user.firstname)) and not (request.user.is_sigma_admin()): return Response('You cannot change your lastname or firstname', status=status.HTTP_400_BAD_REQUEST) return super(UserViewSet, self).update(request, pk) @decorators.list_route(methods=['get']) def me(self, request): """ Give the data of the current user. --- response_serializer: DetailedUserWithPermsSerializer """ if request.user.__class__.__name__ == 'AnonymousUser': return Response(status=status.HTTP_401_UNAUTHORIZED) else: # Use DetailedUserWithPermsSerializer to have the groups whom user belongs to serializer = DetailedUserWithPermsSerializer(request.user, context={'request': request}) return Response(serializer.data) @decorators.list_route(methods=['put']) def change_password(self, request): """ Allow current user to change his password. --- omit_serializer: true parameters_strategy: form: replace parameters: - name: old_password type: string - name: password type: string """ PASSWORD_MIN_LENGTH = 8 if request.user.__class__.__name__ == 'AnonymousUser': return Response(status=status.HTTP_401_UNAUTHORIZED) user = request.user data = request.data if not user.check_password(data['old_password']): return Response("Wrong password", status=status.HTTP_403_FORBIDDEN) if len(data['password']) < PASSWORD_MIN_LENGTH: return Response("'password' must be at least %d characters long" % PASSWORD_MIN_LENGTH, status=status.HTTP_400_BAD_REQUEST) user.set_password(data['password']) user.save() return Response('Password successfully changed', status=status.HTTP_200_OK) @decorators.list_route(methods=['post'], permission_classes=[AllowAny]) def reset_password(self, request): """ Reset current user's password and send him an email with the new one. --- omit_serializer: true parameters_strategy: form: replace parameters: - name: email type: string """ email = request.data.get('email') if email == '': return Response("'email' field cannot be empty", status=status.HTTP_400_BAD_REQUEST) try: user = User.objects.get(email=email) except User.DoesNotExist: return Response('No user found with this email', status=status.HTTP_404_NOT_FOUND) password = ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(10)) mail = reset_mail.copy() mail['recipient_list'] = [user.email] mail['message'] = mail['message'].format(email=user.email, password=password, name=user.get_full_name()) send_mail(**mail) user.set_password(password) user.save() return Response('Password reset', status=status.HTTP_200_OK)
Python
0
@@ -116,16 +116,69 @@ Http404 +%0Afrom django.views.decorators.csrf import csrf_exempt %0A%0Afrom r @@ -226,16 +226,25 @@ , status +, parsers %0Afrom re @@ -778,16 +778,49 @@ %0A%22%22%22%0A%7D%0A%0A +# TODO: use DetailSerializerMixin %0Aclass U @@ -5005,28 +5005,984 @@ status=status.HTTP_200_OK)%0A +%0A @decorators.detail_route(methods=%5B'post'%5D)%0A @decorators.parser_classes(%5Bparsers.MultiPartParser, %5D)%0A def addphoto(self, request, pk=None):%0A %22%22%22%0A Add a profile photo to user %22pk%22.%0A ---%0A omit_serializer: true%0A parameters_strategy:%0A form: replace%0A parameters:%0A - name: file%0A type: file%0A required: true%0A %22%22%22%0A from sigma_files.models import Image%0A from sigma_files.serializers import ImageSerializer_WithoutPerms as ImageSerializer%0A%0A try:%0A user = User.objects.get(pk=pk)%0A except User.DoesNotExist:%0A raise Http404()%0A%0A s = ImageSerializer(data=request.data, context=%7B'request': request%7D)%0A s.is_valid(raise_exception=True)%0A img = s.save()%0A img.owner = user%0A img.save()%0A user.photo = img%0A user.save()%0A%0A return Response(status=status.HTTP_201_CREATED)%0A
020015cccceb3c2391c4764ee2ec29dfc5c461c6
Update plugin's register functions to return the object instance instead of performing the registration themselves
__init__.py
__init__.py
from . import LayerView def getMetaData(): return { "name": "LayerView", "type": "View" } def register(app): app.getController().addView("LayerView", LayerView.LayerView())
Python
0
@@ -117,48 +117,14 @@ -app.getController().addView(%22LayerView%22, +return Lay @@ -141,10 +141,9 @@ erView() -) %0A
90656a3b4eedac9ae87dbdb5485994c58c2f78d9
add pydq
__init__.py
__init__.py
# -*- coding: utf-8 -*- import six __title__ = 'requests' __version__ = '0.0.1' __author__ = 'Pyiner' __license__ = 'Apache 2.0' __copyright__ = 'Copyright 2015 Pyiner' __all__ = ['DataQuery'] class DataQuery(object): def __init__(self, data): self.data = data @staticmethod def item_exist(item, **kwargs): exist = True for k, v in kwargs.items(): if k not in item or item[k] != v: exist = False break return exist def query(self, negate, **kwargs): d = [] for item in self.data: exist = self.item_exist(item, **kwargs) if exist is negate: d.append(item) return self.__class__(data=d) def filter(self, **kwargs): return self.query(True, **kwargs) def exclude(self, **kwargs): return self.query(False, **kwargs) def order_by(self, field): desc = field.startswith('-') field = field.strip('-') d = sorted(self.data, key=lambda x: x[field], reverse=desc) return self.__class__(data=d) def __iter__(self): return self.data def __getitem__(self, k): if not isinstance(k, (slice,) + six.integer_types): raise TypeError data = self.data if isinstance(k, slice): return data[k.start:k.stop:k.step] return data[k] if __name__ == '__main__': xdata = [{ 'a': 1, 'b': 2, 'c': 3 }, { 'a': 2, 'b': 1, 'c': 3 }, { 'a': 3, 'b': 2, 'c': 1 }] dq = DataQuery(xdata) for i in dq.filter(c=1): print i
Python
0.000268
@@ -46,16 +46,12 @@ = ' -requests +pydq '%0A__
b186ed26e3250d8b02c94f5bb3b394c35986bcf6
Remove an import which snuck in but does not belong.
__init__.py
__init__.py
""" Spyral, an awesome library for making games. """ __version__ = '0.1.1' __license__ = 'MIT' __author__ = 'Robert Deaton' import compat import memoize import point import camera import sprite import scene import _lib import event import animator import animation import pygame import image import color import rect import signal import font import vector Scene = scene.Scene Image = image.Image Sprite = sprite.Sprite Group = sprite.Group AnimationSprite = animation.AnimationSprite AnimationGroup = animation.AnimationGroup Rect = rect.Rect Signal = signal.Signal Vec2D = vector.Vec2D keys = event.keys director = scene.Director() def init(): event.init() pygame.init() pygame.font.init() def quit(): pygame.quit() director._stack = []
Python
0
@@ -331,20 +331,8 @@ nal%0A -import font%0A impo
c4a4c9333c874b38c121ce1181c12e7ed5aacc88
Add __init__.py
__init__.py
__init__.py
from ShellGraphics import *
Python
0.006636
@@ -2,14 +2,14 @@ rom -S +s hell -G +g raph
7bee2061fc0609205bb81dc781efbcd833ca74bb
Add save() interface on Workbook
Workbook.py
Workbook.py
import Worksheet class Workbook(object): def __init__(self, encoding='utf-8'): self._worksheets = [] self._encoding = encoding def add_sheet(self, worksheet): self._worksheets.append(worksheet) def new_sheet(self, sheet_name): worksheet = Worksheet.Worksheet(sheet_name, self) self._worksheets.append(worksheet) return worksheet def get_xml_data(self): for index, ws in enumerate(self._worksheets, 1): yield (i, ws)
Python
0.000001
@@ -9,16 +9,42 @@ orksheet +%0Afrom Writer import Writer %0A%0Aclass @@ -151,16 +151,52 @@ encoding +%0A self._writer = Writer(self) %0A%0A%09def a @@ -498,8 +498,89 @@ (i, ws)%0A +%0A def save(self, output_filename):%0A self._writer.save(output_filename)%0A
1ba0a6f93683a0e8f020abe5f34624db33c3a470
Update the package version
__init__.py
__init__.py
'''Utility classes and functions for creating Alfred workflows''' __version__ = '20130909.0' from .alfred import Workflow, WorkflowInfo, Item, JsonFile, LINE from .keychain import Keychain
Python
0
@@ -81,18 +81,18 @@ '201309 -09 +13 .0'%0A%0Afro
9963de82cac9374f6a634b935d08a5499df97964
Add RouteCollection cache
__init__.py
__init__.py
# -*- coding: utf-8 -*- # This file is part of the pymfony package. # # (c) Alexandre Quercia <alquerci@email.com> # # For the full copyright and license information, please view the LICENSE # file that was distributed with this source code. from __future__ import absolute_import; import os.path; import sys; if sys.version_info[0] >= 3: from urllib.parse import urlparse; else: from urlparse import urlparse; from pymfony.component.system import Object; from pymfony.component.system.oop import interface; from pymfony.component.system.types import String; from pymfony.component.system.types import Array; from pymfony.component.system.exception import InvalidArgumentException; from pymfony.component.system.exception import RuntimeException; from pymfony.component.system.serializer import unserialize; from pymfony.component.system.serializer import serialize; """ """ @interface class FileLocatorInterface(Object): """ @author Fabien Potencier <fabien@symfony.com> """ def locate(self, name, currentPath = None, first = True): """Returns a full path for a given file name. @param name: mixed The file name to locate @param currentPath: string The current path @param first: boolean Whether to return the first occurrence or an array of filenames @return: string|list The full path to the file|A list of file paths @raise InvalidArgumentException: When file is not found """ pass; class FileLocator(FileLocatorInterface): """FileLocator uses an array of pre-defined paths to find files. @author Fabien Potencier <fabien@symfony.com> """ def __init__(self, paths = None): """Constructor. @param paths: string|list A path or an array of paths where to look for resources """ if paths is None: self._paths = list(); elif isinstance(paths, String): self._paths = [paths]; else: self._paths = list(paths); def locate(self, name, currentPath = None, first = True): """Returns a full path for a given file name. @param name: mixed The file name to locate @param currentPath: string The current path @param first: boolean Whether to return the first occurrence or an array of filenames @return: string|list The full path to the file|A list of file paths @raise InvalidArgumentException: When file is not found """ if self.__isAbsolutePath(name): if not os.path.exists(name): raise InvalidArgumentException( 'The file "{0}" does not exist.'.format(name) ); return name; filepaths = list(); paths = []; if currentPath: paths.append(currentPath); paths.extend(self._paths); for path in paths: filename = os.path.join(path, name); if os.path.exists(filename): if first: return filename; filepaths.append(filename); if not filepaths: raise InvalidArgumentException( 'The file "{0}" does not exist (in: {1}).' ''.format(name, ", ".join(paths)) ); return Array.uniq(filepaths); def __isAbsolutePath(self, path): """Returns whether the file path is an absolute path. @param path: string A file path @return Boolean """ if (path.startswith('/') or path.startswith('\\') or ( len(path) > 3 and path[0].isalpha() and path[1] == ':' and (path[2] == '\\' or path[2] == '/') ) or urlparse(path)[0] ): return True; return False; class ConfigCache(Object): """ConfigCache manages PHP cache files. When debug is enabled, it knows when to flush the cache thanks to an array of ResourceInterface instances. @author Fabien Potencier <fabien@symfony.com> """ def __init__(self, path, debug): """Constructor. @param string path The absolute cache path @param Boolean debug Whether debugging is enabled or not """ self.__file = path; self.__debug = bool(debug); def __str__(self): """Gets the cache file path. @return string The cache file path """ return self.__file; def isFresh(self): """Checks if the cache is still fresh.: This method always returns True when debug is off and the cache file exists. @return Boolean True if the cache is fresh, False otherwise: """ if not os.path.isfile(self.__file) : return False; if not self.__debug : return True; metadata = self.__file+'.meta'; if not os.path.isfile(metadata) : return False; time = os.path.getmtime(self.__file); f = open(metadata); content = f.read(); f.close(); meta = unserialize(content); for resource in meta : if not resource.isFresh(time) : return False; return True; def write(self, content, metadata = None): """Writes cache. @param string content The content to write in the cache @param ResourceInterface[] metadata An array of ResourceInterface instances @raise RuntimeException When cache file can't be wrote """ assert isinstance(metadata, list); dirname = os.path.dirname(self.__file); if not os.path.isdir(dirname) : try: os.makedirs(dirname, 0o777); except os.error: raise RuntimeException('Unable to create the {0} directory'.format(dirname)); elif not os.access(dirname, os.W_OK) : raise RuntimeException('Unable to write in the {0} directory'.format(dirname)); try: suffix = 0; while os.path.exists(self.__file+str(suffix)): suffix += 1; tmpFile = self.__file+str(suffix); f = open(tmpFile, 'w'); f.write(content); f.close(); if os.path.exists(self.__file): os.remove(self.__file); os.rename(tmpFile, self.__file); if hasattr(os, 'chmod'): umask = os.umask(0o220); os.umask(umask); os.chmod(self.__file, 0o666 & ~umask); except Exception: raise RuntimeException('Failed to write cache file "{0}".'.format(self.__file)); else: try: if hasattr(os, 'chmod'): umask = os.umask(0o220); os.umask(umask); os.chmod(self.__file, 0o666 & ~umask); except Exception: pass; finally: try: f.close(); except Exception: pass; if os.path.exists(tmpFile): os.remove(tmpFile); if None is not metadata and True is self.__debug : filename = self.__file+'.meta'; content = serialize(metadata); try: suffix = 0; while os.path.exists(filename+str(suffix)): suffix += 1; tmpFile = filename+str(suffix); f = open(tmpFile, 'w'); f.write(content); f.close(); if os.path.exists(filename): os.remove(filename); os.rename(tmpFile, filename); except Exception: pass; else: try: if hasattr(os, 'chmod'): umask = os.umask(0o220); os.umask(umask); os.chmod(filename, 0o666 & ~umask); except Exception: pass; finally: try: f.close(); except Exception: pass; if os.path.exists(tmpFile): os.remove(tmpFile);
Python
0
@@ -5655,16 +5655,36 @@ a, list) + or metadata is None ;%0A%0A
8df7fecb367d27d59402797ecfb9c13ba4f438b7
Fix reference to DEFAULT_USER_INTERFACE
__main__.py
__main__.py
#!/usr/bin/env python #This is free and unencumbered software released into the public domain. # #Anyone is free to copy, modify, publish, use, compile, sell, or distribute this #software, either in source code form or as a compiled binary, for any purpose, #commercial or non-commercial, and by any means. # #In jurisdictions that recognise copyright laws, the author or authors of this #software dedicate any and all copyright interest in the software to the public #domain. We make this dedication for the benefit of the public at large and to #the detriment of our heirs and successors. We intend this dedication to be an #overt act of relinquishment in perpetuity of all present and future rights to #this software under copyright law. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN #ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # #For more information, please refer to <https://unlicense.org/>. """ Provides a base class for the application, and then starts the application. """ import os #For finding the root directory of Luna. import sys #For reading command line arguments. import luna.plugins #To initiate the plug-in loading and use the APIs. class Luna: """ Base instance of the application. """ DEFAULT_USER_INTERFACE = "automatic" """ The default user interface to start with, unless instructed otherwise. If this user interface does not exist, an error is thrown and the application closes. """ def run(self): """ .. function:: run() Starts the application. This process will start the plug-in registering, and then selects a user interface based on the command line arguments. :returns: ``True`` if the application was finished successfully, or ``False`` if something went wrong. """ base_dir = os.path.dirname(os.path.abspath(__file__)) #Add the plugin directories. luna.plugins.add_plugin_location(os.path.join(base_dir, "plugins")) luna.plugins.discover() logger = luna.plugins.api("logger") logger.set_log_levels([logger.Level.ERROR, logger.Level.CRITICAL, logger.Level.WARNING, logger.Level.INFO, logger.Level.DEBUG]) user_interface_name = self.DEFAULT_INTERFACE if len(sys.argv) >= 2: user_interface_name = sys.argv[1] try: if not luna.plugins.api("userinterface").exists(user_interface_name): logger.error("Could not load the user interface {userinterface}. Aborting.", userinterface=user_interface_name) return False except ImportError: logger.error("Could not load the user interface plug-in type. Aborting.") return False luna.plugins.api("userinterface").start(user_interface_name) luna.plugins.api("userinterface").join(user_interface_name) return True #Success. #Launches Luna if called from the command line. if __name__ == "__main__": _application = Luna() _application.run()
Python
0
@@ -2470,16 +2470,21 @@ DEFAULT_ +USER_ INTERFAC
16280da485c5563a8ec187928a61c8ca5512a530
fix set_final() method
fst.py
fst.py
import copy import re class State: """ State Class """ def __init__(self, id=''): self.id = id self.final = False self.trans_map = {} self.final_output = set() def is_final(self): return self.final def set_final(self, final): self.final = final self.trans_map = {} def transition(self, char): if char in self.trans_map: return self.trans_map[char]['state'] else: return None def set_transition(self, char, state): self.trans_map[char] = {'state': state, 'output': '' if char not in self.trans_map else self.trans_map[char]['output']} def state_output(self): return self.final_output def set_state_output(self, output): self.final_output = output def add_state_output(self, output): self.final_output.add(output) def clear_state_output(self): self.final_output = set() def output(self, char): if char in self.trans_map: return self.trans_map[char]['output'] else: return '' def set_output(self, char, str): if char in self.trans_map: self.trans_map[char]['output'] = str def deepcopy(self, id): state = State(id) state.final = self.final state.trans_map = copy.deepcopy(self.trans_map) state.final_output = copy.deepcopy(self.final_output) return state def clear(self): self.final = False self.trans_map = {} self.final_output = set() def __eq__(self, other): if other is None or not isinstance(other, State): return False else: return \ self.final == other.final and \ self.trans_map == other.trans_map and \ self.final_output == other.final_output class FST: """ FST (final dictionary) class """ def __init__(self): self.dictionary = [] def size(self): return len(self.dictionary) def member(self, state): for s in self.dictionary: if s == state: return s return None def insert(self, state): self.dictionary.append(state) def print_dictionary(self): for s in reversed(self.dictionary): for (c, v) in s.trans_map.items(): print(s.id, c, v['state'].id, v['output'], sep='\t') if s.is_final(): print(s.id, 'final', s.final_output, sep='\t') # all characters CHARS = set() # naive implementation for building fst # http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.24.3698 def create_minimum_transducer(inputs): fstDict = FST() buffer = [] buffer.append(State()) # insert 'initial' state # previous word prev_word = '' def find_minimized(state): # if an equal state exists in the dictionary, use that s = fstDict.member(state) if s is None: # if no equal state exists, insert new one ant return it s = state.deepcopy("S" + str(fstDict.size())) fstDict.insert(s) return s def prefix_len(s1, s2): # calculate max common prefix length for s1 and s2 i = 0 while i < len(s1) and i < len(s2) and s1[i] == s2[i]: i += 1 return i # main loop for (current_word, current_output) in inputs: for c in current_word: CHARS.add(c) pref_len = prefix_len(prev_word, current_word) # expand buffer to current word length while len(buffer) <= len(current_word): buffer.append(State()) # set state transitions for i in range(len(prev_word), pref_len, -1): buffer[i - 1].set_transition(prev_word[i - 1], find_minimized(buffer[i])) for i in range(pref_len + 1, len(current_word) + 1): buffer[i].clear() buffer[i - 1].set_transition(current_word[i - 1], buffer[i]) if current_word != prev_word: buffer[len(current_word)].set_final(True) buffer[len(current_word)].set_state_output(set([''])) # set state outputs for j in range(1, pref_len + 1): # divide (j-1)th state's output to (common) prefix and suffix common_prefix = '' output = buffer[j - 1].output(current_word[j - 1]) k = 0 while k < len(output) and k < len(current_output) and output[k] == current_output[k]: common_prefix += output[k] k += 1 word_suffix = re.sub("^" + common_prefix, "", output) # re-set (j-1)'th state's output to prefix buffer[j - 1].set_output(current_word[j - 1], common_prefix) # re-set jth state's output to suffix or set final state output for c in CHARS: # FIXME: terrible loop... if buffer[j].transition(c) is not None: new_output = buffer[j].output(c) + word_suffix buffer[j].set_output(c, new_output) # or, set final state output if it's a final state if buffer[j].is_final(): tmp_set = set() for tmp_str in buffer[j].state_output(): tmp_set.add(tmp_str + word_suffix) buffer[j].set_state_output(tmp_set) # update current output (subtract prefix) current_output = re.sub("^" + common_prefix, "", current_output) if current_word == prev_word: buffer[len(current_word)].set_state_output(buffer[len(current_word)].state_output() | set(current_output)) else: buffer[pref_len].set_output(current_word[pref_len], current_output) # preserve current word for next loop prev_word = current_word # minimize the last word for i in range(len(current_word), 0, -1): buffer[i - 1].set_transition(prev_word[i - 1], find_minimized(buffer[i])) find_minimized(buffer[0]) return fstDict if __name__ == '__main__': inputs1 = [ ('apr', '30'), ('aug', '31'), ('dec', '31'), ('feb', '28'), ('feb', '29'), ('jan', '31'), ('jul', '31'), ('jun', '30') ] dict = create_minimum_transducer(inputs1) dict.print_dictionary() print("\n\n") inputs2 = [ ('さくら', '10'), ('さくらんぼ', '11'), ('すもも', '20'), ('なし', '10'), ('もも', '20'), ] dict = create_minimum_transducer(inputs2) dict.print_dictionary()
Python
0.000008
@@ -317,36 +317,8 @@ inal -%0A self.trans_map = %7B%7D %0A%0A
0f7a2e05beab5f2d759bdaacdc34b6569dfd368d
Fix plugin name
__init__.py
__init__.py
from maya import cmds import logging import json import imp import os # level = logging.DEBUG level = logging.ERROR logger = logging.getLogger(__name__) handler = logging.StreamHandler() logger.addHandler(handler) logger.setLevel(level) handler.setLevel(level) def loadConfig(): """ Load config file Return: config(list): List of path module paths """ userDir = os.path.expanduser("~") configPath = os.path.join(userDir, ".rushConfig") defaultModulePath = os.path.join( cmds.internalVar(userScriptDir=True), 'rush') # Create new config file if not os.path.exists(configPath): initConfig(configPath, defaultModulePath) config = [defaultModulePath] return config try: f = open(configPath, 'r') config = f.read().split() f.close() except IOError: config = [defaultModulePath] logger.debug("Failed to load config file") return config def initConfig(configPath, defaultModulePath): """ Init and save new config file Args: configPath (str): path to config file defaultModulePath (str): default module path Return: None """ logger.debug("Config file doesn't exist. Creating a new config file") # Init config file try: with open(configPath, 'w') as outFile: outFile.writelines([defaultModulePath]) logger.debug("Created new config file") except IOError: logger.debug("Failed to save config file") def getModulePath(path): """ Create and return a list of module paths Args: path (str): directory path to search modules Return: mods (list): List of module paths None: if the path doesn't exist """ if not os.path.exists(path): return None # Get all files in the directory allFiles = [os.path.join(root, f) for root, firs, files in os.walk(path) for f in files] # Get only python files pythonFiles = [i for i in allFiles if i.endswith(".py")] # Remove __init__ and main plugin file mods = [f for f in pythonFiles if not f.endswith("__init__.py") and not f.endswith("rush.py")] return mods def loadModule(path): """ Load module Args: path (str): module path Return: mod (module object): command module None: if path doesn't exist """ # Create module names for import, for exapmle ... # # "rush/template" # "animation/animate" # "common/create" # "common/display" name = os.path.splitext(path)[0].split("/") name = "/".join(name[-2:]) try: mod = imp.load_source(name, path) return mod except: logger.debug("Failed to load module : %s" % path) return None def getClassList(config): """ Create and return a list of command classes Args: config (list): List of paths Return: commandClassList: list of classes """ # Create a single list of module paths moduleList = [] for path in config: logger.debug("Module path: %s " % path) pathList = getModulePath(path) if pathList is not None: moduleList.extend(pathList) # Create a list of module objects moduleObjectList = [] for path in moduleList: m = loadModule(path) if m is not None: moduleObjectList.append(m) # Class only for the reload command class Reload(object): commandDict = {} def _reloadRush(self): try: cmds.unloadPlugin("rush.py") cmds.loadPlugin("rush.py") except: print "Failed to reload plugin" commandDict['reloadRush'] = "sphere.png" # Crate a list of classes commandClassList = [i.Commands for i in moduleObjectList] commandClassList.append(Reload) logger.debug("All command classes: %s" % str(commandClassList)) # Create and write a list of all commands for the completer in main plugin cmdsDict = {} for c in commandClassList: cmdsDict.update(c.commandDict) outPath = os.path.normpath( os.path.join( cmds.internalVar(userScriptDir=True), "rushCmds.json")) saveCommands(outPath, cmdsDict) return commandClassList def saveCommands(path, cmdsDict): """ Save all commands as a json file in the maya user directory Args: path (str): output path cmdsDict (dict): All commands Return: None """ logger.debug("Saving command file to %s" % path) try: with open(path, 'w') as outFile: json.dump( cmdsDict, outFile, indent=4, separators=(',', ':'), sort_keys=True) except IOError: logger.debug("Failed to save command file") class RushCommands(object): pass # Re-difine RushCommands class to inherit all comamnd classes for the list config = loadConfig() cl = tuple(getClassList(config)) RushCommands = type('RushCommands', cl, dict(RushCommands.__dict__))
Python
0.000001
@@ -2281,17 +2281,17 @@ dswith(%22 -r +R ush.py%22) @@ -3753,33 +3753,33 @@ s.unloadPlugin(%22 -r +R ush.py%22)%0D%0A @@ -3805,17 +3805,17 @@ Plugin(%22 -r +R ush.py%22)
c764c7ade8e9407136b2b855a74538d76392b955
Allow all hosts
nonhumanuser/settings.py
nonhumanuser/settings.py
""" Django settings for nonhumanuser project. Generated by 'django-admin startproject' using Django 1.9.1. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os import dj_database_url # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '403xt9_9l+*)tw08ur@t_k8fem7_g*du-*^*^e0v+m1d6j6i#p' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = False #bool(os.environ.get('NHU_DEBUG')) ALLOWED_HOSTS = ['*'] #[os.environ.get('NHU_HOST')] SITE_ID = 1 # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.sites', 'django.contrib.flatpages', 'app', 'blog', 'library', 'actual_play', 'django_extensions', 'bootstrap3', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'nonhumanuser.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(os.path.join(BASE_DIR, 'templates'), 'app')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'nonhumanuser.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'nonhumanuser', 'HOST': 'localhost', 'PORT': '5432', 'USER': 'webboss', 'PASSWORD': 'NHU#lanfear138', } } DB_ENV = { 'default': 'NHU_DATABASE', } """ for db_name, env_var in DB_ENV.items(): url = os.environ.get(env_var) if url: DATABASES[db_name] = dj_database_url.parse(url) """ # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Los_Angeles' USE_I18N = True USE_L10N = True #TODO Eventually change this to True and use django.utils.timezone.now() instead of datetime.datetime.now() everywhere USE_TZ = False # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_ROOT = BASE_DIR STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), ] # Media files (images, videos, PDFs) MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' # Markdown MARKDOWN_EDITOR_SKIN = 'simple' # Registration AUTH_PROFILE_MODULE = "app.Profile" ACCOUNT_ACTIVATION_DAYS = 30 EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = 'localhost' EMAIL_PORT = 25 EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False DEFAULT_FROM_EMAIL = 'support@nonhumanuser.com' ALLOWED_HOSTS = ['www.nonhumanuser.com']
Python
0
@@ -4492,46 +4492,4 @@ om'%0A -%0AALLOWED_HOSTS = %5B'www.nonhumanuser.com'%5D%0A
0b1b1ef6d3af248021125135b7534b3870517890
Add tasks urls
task/urls.py
task/urls.py
Python
0.000018
@@ -0,0 +1,171 @@ +from django.conf.urls import include, url%0Afrom views import *%0Afrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token%0A%0Aurlpatterns = %5B%0A%5D
42bd66cd7e272746f0bb2add6817cb935ea17898
add blanks line - pep8
nosdeputes/update_mps.py
nosdeputes/update_mps.py
import sys import time from json import load from urllib2 import urlopen, HTTPError from django.db import transaction from memopol2.utils import get_or_create from reps.models import Email, WebSite from mps.models import MP def update_personal_informations(_mp, mp): _mp.full_name = mp["nom"] _mp.last_name = mp["nom_de_famille"] _mp.an_webpage = mp["url_an"] _mp.profession = mp["profession"] def get_new_websites(mp, _mp): if mp["sites_web"]: for website in mp["sites_web"]: get_or_create(WebSite, url=website["site"], representative=_mp.representative_ptr) def get_new_emails(mp, _mp): for email in mp["emails"]: get_or_create(Email, email=email["email"], representative=_mp.representative_ptr) def set_mps_unactives(): print "Setting all mps to unactive" a = 0 total = MP.objects.count() for mp in MP.objects.all(): a += 1 mp.active = False mp.save() sys.stdout.write("%i/%i\r" % (a, total)) sys.stdout.flush() sys.stdout.write("\n") if __name__ == "__main__": mps = load(urlopen("http://www.nosdeputes.fr/deputes/json")) with transaction.commit_on_success(): set_mps_unactives() a = 0 for depute in mps["deputes"]: a += 1 try: mp = load(urlopen(depute["depute"]["url_nosdeputes_api"]))["depute"] except HTTPError: try: print "Warning, failed to get a deputy, retrying in one seconde (url: %s)" % depute["depute"]["api_url"] time.sleep(1) mp = load(urlopen(depute["depute"]["url_nosdeputes_api"]))["depute"] except HTTPError: print "Didn't managed to get this deputy, abort" print "Go repport the bug on irc.freenode.net#regardscitoyens" sys.exit(1) print a, "-", mp["nom"] #mp = load(open("test"))["depute"] _mp = MP.objects.filter(an_id=mp["url_an"].split("/")[-1].split(".")[0]) if _mp: _mp = _mp[0] if not depute["depute"].get("ancien_depute"): _mp.active = True update_personal_informations(_mp, mp) get_new_emails(mp, _mp) get_new_websites(mp, _mp) print mp.get("groupe") #if mp["groupe_sigle"] and mp["groupe_sigle"] != "NI": #Group.objects.get(abbreviation=mp["groupe_sigle"]) _mp.save()
Python
0.000006
@@ -407,16 +407,17 @@ sion%22%5D%0A%0A +%0A def get_ @@ -595,24 +595,25 @@ ative_ptr)%0A%0A +%0A def get_new_ @@ -751,16 +751,17 @@ e_ptr)%0A%0A +%0A def set_
450fd3f082000eb2bb1774caf32a5c7cd5793ecc
manage http error for nosdeputes.fr
nosdeputes/update_mps.py
nosdeputes/update_mps.py
import sys from json import load from urllib2 import urlopen from memopol2.utils import get_or_create from reps.models import Email, WebSite from mps.models import MP def update_personal_informations(_mp, mp): _mp.full_name = mp["nom"] _mp.last_name = mp["nom_de_famille"] _mp.an_webpage = mp["url_an"] _mp.profession = mp["profession"] def get_new_websites(mp, _mp): if mp["site_web"]: get_or_create(WebSite, url=mp["site_web"], representative=_mp.representative_ptr) def get_new_emails(mp, _mp): for email in mp["emails"]: get_or_create(Email, email=email["email"], representative=_mp.representative_ptr) def set_mps_unactives(): print "Setting all mps to unactive" a = 0 total = MP.objects.count() for mp in MP.objects.all(): a += 1 mp.active = False mp.save() sys.stdout.write("%i/%i\r" % (a, total)) sys.stdout.flush() sys.stdout.write("\n") if __name__ == "__main__": mps = load(urlopen("http://www.nosdeputes.fr/deputes/json")) set_mps_unactives() a = 0 for depute in mps["deputes"]: a += 1 mp = load(urlopen(depute["depute"]["api_url"]))["depute"] print a, "-", mp["nom"] _mp = MP.objects.filter(an_id=mp["url_an"].split("/")[-1].split(".")[0]) if _mp: _mp = _mp[0] if not depute["depute"].get("ancien_depute"): _mp.active = True update_personal_informations(_mp, mp) get_new_emails(mp, _mp) get_new_websites(mp, _mp) _mp.full_name = mp["nom"] _mp.last_name = mp["nom_de_famille"] _mp.an_webpage = mp["url_an"] _mp.profession = mp["profession"] for email in mp["emails"]: get_or_create(Email, email=email["email"], representative=_mp.representative_ptr) if mp["site_web"]: get_or_create(WebSite, url=mp["site_web"], representative=_mp.representative_ptr) _mp.save()
Python
0
@@ -4,16 +4,28 @@ ort sys%0A +import time%0A from jso @@ -65,16 +65,27 @@ urlopen +, HTTPError %0A%0Afrom m @@ -1138,32 +1138,49 @@ %0A a += 1%0A + try:%0A mp = loa @@ -1225,24 +1225,494 @@ )%5B%22depute%22%5D%0A + except HTTPError:%0A try:%0A print %22Warning, failed to get a deputy, retrying in one seconde (url: %25s)%22 %25 depute%5B%22depute%22%5D%5B%22api_url%22%5D%0A time.sleep(1)%0A mp = load(urlopen(depute%5B%22depute%22%5D%5B%22api_url%22%5D))%5B%22depute%22%5D%0A except HTTPError:%0A print %22Didn't managed to get this deputy, abort%22%0A print %22Go repport the bug on irc.freenode.net#regardscitoyens%22%0A sys.exit(1)%0A prin
85610d7e69abe8f75c9f64ede243dfaeb97b5601
Update spider.py
script/spider.py
script/spider.py
#!/usr/bin/env python #-*- coding: utf-8 -*- import thread, time, cv2 import numpy as np from gpiozero import Motor, PWMLED # Eyelib can be managed by following variables: # blinking : If True start a blink animation (1 cycle) # blinkspd : Speed of blinking # eye : Eye image # lid : Lid base image import eyelib # Last dewarped frame can be accessed by using variable "panorama" # A copy should be used to access data in order to avoid in-between garbage import dewarp # Constants M_FWD = 0 # GPIO pin for forward movement M_BWD = 0 # GPIO pin for backward movement M_CKW = 0 # GPIO pin for clockwise rotation M_CCW = 0 # GPIO pin for counterclockwise rotation BLIGHT = 0 # GPIO pin for TFT backlight control ANG_SPD = 0 # Angular speed for head rotation Facing = 0 # Current direction (approximate) # Actuators creation motor = Motor(M_FWD, M_BWD, pwm=True) head = Motor(M_CKW, M_CCW, pwm=True) backlight = PWMLED(BLIGHT) # Move the spider forward or backward. Speed -1..0 = backward, 0..1 = forward def Move(spd): if (spd>0): motor.forward(spd) else: motor.backward(abs(spd)) if (spd==0): motor.stop() # Rotate the head. Angle is the approximate rotation (0..360). Speed -1..0 = counterclockwise, 0..1 = clockwise def Rotate(angle,spd): global Facing if (spd>0): head.forward(spd) else: head.backward(abs(spd)) if (spd<>0): time.sleep(ANG_SPD(angle/abs(spd))) head.stop() Facing=(Facing-(angle*(np.sign(spd))))%360 # Returns the angles of the brightest spot in the panoramic frame (x = horizontal, y = vertical) def FindBrightestSpot(img): gray = cv2.cvtColor(dewarp.panorama, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (11, 11), 0) minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(gray) X = ((dewarp.Wd-maxLoc[0])*360)/dewarp.Wd # Offset to be added to fix start position of the panorama Y = ((dewarp.Hd-maxLoc[0])*30)/dewarp.Hd # Should be fixed after computing the right vertical FOV of the panorama return X,Y # Main program if __name__ == '__main__': backlight.value=1 # Start with backlight at full brightness motor.stop() # Be sure the robot is not moving head.stop() # Be sure the robot is not moving thread.start_new_thread(Eye, ()) # Eye thread thread.start_new_thread(UnWarp, ()) # Unwarping thread while True: # Loop forever pass
Python
0.000001
@@ -259,16 +259,203 @@ linking%0A +# eyeangle :Ddirection of view%0A# eyedistance : Distance of pupil from center%0A# eyelid : Lid aperture (0=full open, 100=full close)%0A# autoblink : If True blinking is automatic%0A # eye
95a486b1b6658201ff6a3d1a1cd9dae7eb0f30a9
Test merge
genome_browser_utils/build_new_database.py
genome_browser_utils/build_new_database.py
#!/usr/bin/python import MySQLdb import re import os import csv import sys def generate_sql_dict_from_csv(naming_csv): transactions = [] with open(naming_csv, 'rb') as f: reader = csv.reader(f) print "reading headers..." header = reader.next() for rownum, row in enumerate(reader): print "reading data..." proc_row = row[0].split(";") print len(proc_row) sql_dict = { "name": "%s" % proc_row[0], "desc": "%s" % proc_row[1], "nib": "/gbdb/%s" % proc_row[0], "organism": "%s" % proc_row[2], "defaultPos": "%s" % proc_row[3], "active": 1, "orderKey": rownum, "genome": "%s" % proc_row[4], "scientificName": "%s" % proc_row[5], "htmlPath": "/gbdb/%s/html/description.html" % proc_row[0], "hgNearOk": 0, "hgPbOk": 0, "sourceName": "%s" % proc_row[6], "taxId": "%s" % proc_row[7] } transactions.append(sql_dict) return transactions def execute_sql_queries(transactions): # Step 11 # setting up the MySQL connection print "opening database" db = MySQLdb.connect(host="localhost", user="root", passwd="browser", db="hgcentral") dbcursor = db.cursor() return True for trans in transactions: for x,y in trans.iteritems(): print "%s: %s" % (x,y) dbcursor.execute("""INSERT INTO dbDb (name, description, nibPath, organism, defaultPos, active, orderKey, genome, scientificName, htmlPath, hgNearOk, hgPbOk, sourceName, taxId) VALUES (%(name)s, %(desc)s, %(nib)s, %(organism)s, %(defaultPos)s, %(active)s, %(orderKey)s, %(genome)s, %(scientificName)s, %(htmlPath)s, %(hgNearOk)s, %(hgPbOk)s, %(sourceName)s, %(taxId)s)""", trans) print "entered successfully into DBDB" dbcursor.execute("""INSERT INTO defaultDb (genome, name) VALUES (%(genome)s, %(name)s)""", trans) dbcursor.execute("""INSERT INTO genomeClade (genome, clade, priority) VALUES (%(genome)s, 'insect', 10)""", trans) ''' DELETE FROM `dbDb` where `orderKey` = 1 or `orderKey` =2 ''' if __name__ == '__main__': if len(sys.argv) != 2: print "usage: build_new_database.py <filename>" sys.exit() if not os.path.isfile(sys.argv[1]): print "%s is not a file" % sys.argv[1] print "usage: build_new_database.py <filename>" sys.exit() transactions = generate_sql_dict_from_csv(sys.argv[1]) execute_sql_queries(transactions)
Python
0.000001
@@ -395,16 +395,29 @@ it(%22;%22)%0A +%3C%3C%3C%3C%3C%3C%3C HEAD%0A %09 pri @@ -866,16 +866,631 @@ ow%5B7%5D %7D%0A +=======%0A splitName = proc_row%5B1%5D.split()%0A name = splitName%5B0%5D%5B:3%5D.lower() + splitName%5B1%5D%5B0%5D.upper() + splitName%5B1%5D%5B1:3%5D.lower() + str(435)%0A sql_dict = %7B %22name%22: %22%25s%22 %25 name, %22desc%22: %22%25s%22 %25 name, %22nib%22: %22/gbdb/%25s%22 %25 name, %22organism%22: %22%25s%22 %25 proc_row%5B1%5D, %22defaultPos%22: %22%25s%22 %25 proc_row%5B2%5D, %22active%22: 1, %22orderKey%22: rownum, %22genome%22: %22%25s%22 %25 proc_row%5B3%5D, %22scientificName%22: %22%25s%22 %25 proc_row%5B4%5D, %22htmlPath%22: %22/gbdb/%25s/html/description.html%22 %25 name, %22hgNearOk%22: 0, %22hgPbOk%22: 0, %22sourceName%22: %22%25s%22 %25 proc_row%5B5%5D, %22taxId%22: %22%25s%22 %25 proc_row%5B6%5D %7D%0A%3E%3E%3E%3E%3E%3E%3E 35aadafdb1813d3b4cb9beb4565be924d5ecbd4c%0A
f37f556ed497cf9c69f780290465610216ed6f5c
refactor ProfileViewTestCase: add setUp inherited by super class
english_diary/profiles/tests/test_views.py
english_diary/profiles/tests/test_views.py
from django.test import Client from django.core.urlresolvers import reverse from django.contrib.auth import get_user_model import datetime from core.tests.base import BaseTestCase from profiles.utils import set_expiration_date class ProfileViewTestCase(BaseTestCase): def test_verify_user_verification_key(self): client = Client() response = client.get( reverse( "profiles:email_verification", kwargs={ "verification_key": self.user.profile.verification_key, } ), follow=True, ) self.assertEqual( response.status_code, 200, ) self.assertRedirects( response, "/signin/?next=/", ) self.assertTrue( get_user_model().objects.last().is_verified, ) def test_user_verification_key_expires(self): # Make verification_key expire self.user.profile.key_expires = set_expiration_date(-1) self.user.profile.save() client = Client() response = client.get( reverse( "profiles:email_verification", kwargs={ "verification_key": self.user.profile.verification_key, } ), follow=True, ) self.assertTrue( get_user_model().objects.last().profile.is_expired_key, ) self.assertEqual( response.status_code, 200, ) self.assertRedirects( response, reverse( "profiles:key_expires", kwargs={ "verification_key": self.user.profile.verification_key, } ), )
Python
0.000001
@@ -277,48 +277,69 @@ def -test_verify_user_verification_key(self): +setUp(self):%0A super(ProfileViewTestCase, self).setUp() %0A @@ -335,32 +335,37 @@ setUp()%0A +self. client = Client( @@ -357,32 +357,82 @@ lient = Client() +%0A%0A def test_verify_user_verification_key(self): %0A respons @@ -427,32 +427,37 @@ response = +self. client.get(%0A @@ -1155,34 +1155,8 @@ ()%0A%0A - client = Client()%0A @@ -1170,16 +1170,21 @@ ponse = +self. client.g
013b33c8e8e788a2567449846a050b45ebdd4011
Improve sbl bot. Fix parser to properly parse netblocks as well as all ip addresses.
abusehelper/contrib/spamhaus/sbl.py
abusehelper/contrib/spamhaus/sbl.py
import idiokit from abusehelper.core import cymruwhois, bot, events class SpamhausSblBot(bot.PollingBot): use_cymru_whois = bot.BoolParam(default=True) sbl_filepath = bot.Param("Filename of Spamhaus SBL file") @idiokit.stream def poll(self): skip_chars = ["#", ":", "$"] self.log.info("Opening %s" % self.sbl_filepath) ips = [] try: with open(self.sbl_filepath, "r") as f: for line in f: skip = False for c in skip_chars: if line.startswith(c): skip = True if skip: continue parts = line.split() if len(parts) == 1: ip = parts[0] elif len(parts) == 2: ip_parts = parts[0].split("/") if len(ip_parts) != 2: continue # for now we handle only /32 addresses if ip_parts[1] != "/32": continue ip = ip[0] else: continue ips.append(ip) self.log.info("Read %d ip addresses" % len(ips)) except IOError, ioe: self.log.error("Could not open %s: %s" % (self.sbl_filepath, ioe)) for ip in ips: event = events.Event() event.add("ip", ip) event.add("feed", "spamhaus block list") event.add("type", "spambot") if self.use_cymru_whois: values = yield cymruwhois.lookup(ip) for key, value in values: event.add(key, value) yield idiokit.send(event) if __name__ == "__main__": SpamhausSblBot.from_command_line().execute()
Python
0
@@ -210,24 +210,969 @@ SBL file%22)%0A%0A + def _event(self):%0A event = events.Event()%0A event.add(%22feed%22, %22spamhaus block list%22)%0A event.add(%22type%22, %22spambot%22)%0A return event%0A%0A @idiokit.stream%0A def _cymru_augment(self, event, key):%0A if self.use_cymru_whois:%0A values = yield cymruwhois.lookup(key)%0A for key, value in values:%0A event.add(key, value)%0A yield idiokit.stop(event)%0A%0A @idiokit.stream%0A def _ip_events(self, ips):%0A for ip in ips:%0A event = self._event()%0A event.add(%22ip%22, ip)%0A event = yield self._cymru_augment(event, ip)%0A yield idiokit.send(event)%0A%0A @idiokit.stream%0A def _netblock_events(self, netblocks):%0A for netblock in netblocks:%0A event = self._event()%0A event.add(%22netblock%22, netblock)%0A event = yield self._cymru_augment(event, netblock.split(%22/%22)%5B0%5D)%0A yield idiokit.send(event)%0A%0A @idiokit @@ -1308,16 +1308,39 @@ ips = %5B%5D +%0A netblocks = %5B%5D %0A%0A @@ -1453,177 +1453,41 @@ -skip = False%0A for c in skip_chars:%0A if line.startswith(c):%0A skip = True%0A%0A if skip +if line and line%5B0%5D in skip_chars :%0A @@ -1626,27 +1626,33 @@ ip - = +s.append( parts%5B0%5D %0A @@ -1643,16 +1643,17 @@ parts%5B0%5D +) %0A @@ -1716,17 +1716,9 @@ i -p_parts = +f par @@ -1727,383 +1727,209 @@ %5B0%5D. -spl +endsw it +h (%22/ -%22)%0A if len(ip_parts) != 2:%0A continue%0A%0A # for now we handle only /32 addresses%0A if ip_parts%5B1%5D != %22/32%22:%0A continue%0A%0A ip = ip%5B0%5D%0A else:%0A continue%0A%0A ips.append(ip) +32%22):%0A ip, _ = parts%5B0%5D.split(%22/%22)%0A ips.append(ip)%0A else:%0A netblocks.append(parts%5B0%5D)%0A %0A @@ -1976,20 +1976,35 @@ sses +, %25d netblocks %22 %25 +( len(ips) )%0A @@ -1999,16 +1999,33 @@ len(ips) +, len(netblocks)) )%0A @@ -2139,396 +2139,80 @@ -for ip in ips:%0A event = events.Event()%0A event.add(%22ip%22, ip)%0A event.add(%22feed%22, %22spamhaus block list%22)%0A event.add(%22type%22, %22spambot%22)%0A%0A if self.use_cymru_whois:%0A values = yield cymruwhois.lookup(ip)%0A for key, value in values:%0A event.add(key, value)%0A%0A yield idiokit.send(event +yield self._ip_events(ips)%0A yield self._netblock_events(netblocks )%0A%0Ai
ce45df98d7fbd9b2679d20ac21a188a18295b667
remove debug print
3rdParty/V8/v5.7.0.0/gypfiles/gyp_v8.py
3rdParty/V8/v5.7.0.0/gypfiles/gyp_v8.py
# Copyright 2013 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # This file is (possibly, depending on python version) imported by # gyp_v8 when GYP_PARALLEL=1 and it creates sub-processes through the # multiprocessing library. # Importing in Python 2.6 (fixed in 2.7) on Windows doesn't search for imports # that don't end in .py (and aren't directories with an __init__.py). This # wrapper makes "import gyp_v8" work with those old versions and makes it # possible to execute gyp_v8.py directly on Windows where the extension is # useful. import os import sys sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..')) if ("-Dbyteorder=big" not in sys.argv and "-Dbyteorder=little" not in sys.argv): sys.argv.append("-Dbyteorder=" + sys.byteorder) sys.argv.append("-DPYTHON_EXECUTABLE=" + sys.executable) print("x"*80) print(sys.argv) sys.byteorder path = os.path.abspath(os.path.split(__file__)[0]) execfile(os.path.join(path, 'gyp_v8'))
Python
0.000008
@@ -2336,38 +2336,8 @@ e)%0A%0A -print(%22x%22*80)%0Aprint(sys.argv)%0A sys.
0ec01e1c5770c87faa5300b80c3b9d6bcb0df41b
Make sure to return python values, not lxml objects
tcxparser.py
tcxparser.py
"Simple parser for Garmin TCX files." from lxml import objectify __version__ = '0.3.0' class TcxParser: def __init__(self, tcx_file): tree = objectify.parse(tcx_file) self.root = tree.getroot() self.activity = self.root.Activities.Activity @property def latitude(self): return self.activity.Lap.Track.Trackpoint.Position.LatitudeDegrees @property def longitude(self): return self.activity.Lap.Track.Trackpoint.Position.LongitudeDegrees @property def activity_type(self): return self.activity.attrib['Sport'].lower() @property def completed_at(self): return self.activity.Lap[-1].Track.Trackpoint[-1].Time @property def distance(self): return self.activity.Lap[-1].Track.Trackpoint[-2].DistanceMeters @property def distance_units(self): return 'meters' @property def duration(self): """Returns duration of workout in seconds.""" return sum(lap.TotalTimeSeconds for lap in self.activity.Lap) @property def calories(self): return sum(lap.Calories for lap in self.activity.Lap)
Python
0
@@ -81,9 +81,9 @@ '0. -3 +4 .0'%0A @@ -371,32 +371,38 @@ .LatitudeDegrees +.pyval %0A%0A @property%0A @@ -501,16 +501,22 @@ eDegrees +.pyval %0A%0A @p @@ -710,16 +710,22 @@ -1%5D.Time +.pyval %0A%0A @p @@ -828,16 +828,22 @@ ceMeters +.pyval %0A%0A @p
0e4b650f49cbfc44027d2b86c42c43abfc6dc494
allow extending acknowledgments, closes #47
django_datawatch/models.py
django_datawatch/models.py
# -*- coding: UTF-8 -*- from __future__ import unicode_literals from dateutil import relativedelta from django.utils import timezone from django.conf import settings from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from django_extensions.db.fields.json import JSONField from model_utils.choices import Choices from model_utils.models import TimeStampedModel from django_datawatch.querysets import CheckExecutionQuerySet from .datawatch import datawatch from .querysets import ResultQuerySet class AlreadyAcknowledged(Exception): pass @python_2_unicode_compatible class Result(TimeStampedModel): STATUS = Choices((0, 'unknown', _('Unknown')), (1, 'ok', _('OK')), (2, 'warning', _('Warning')), (3, 'critical', _('Critical'))) slug = models.TextField(verbose_name=_('Module slug')) identifier = models.CharField(max_length=256, verbose_name=_('Identifier')) status = models.IntegerField(choices=STATUS, default=STATUS.unknown, verbose_name=_('Status')) data = JSONField(blank=True, default=dict, verbose_name=('Data')) config = JSONField(blank=True, default=dict, verbose_name=_('Configuration')) payload_description = models.TextField(verbose_name=_('Payload description')) acknowledged_by = models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, blank=True, verbose_name=_('Acknowledged by'), related_name='acknowledged_by', on_delete=models.CASCADE) acknowledged_at = models.DateTimeField(null=True, blank=True, verbose_name=_('Acknowledged at')) acknowledged_until = models.DateTimeField(null=True, blank=True, verbose_name=_('Acknowledged until')) acknowledged_reason = models.TextField(blank=True, verbose_name=_('Acknowledge reason')) assigned_to_user = models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, blank=True, related_name='assigned_to_user', on_delete=models.SET_NULL) assigned_to_group = models.ForeignKey(to='auth.Group', null=True, blank=True, on_delete=models.SET_NULL) objects = ResultQuerySet.as_manager() class Meta: unique_together = ('slug', 'identifier') permissions = (('view', 'Can view results dashboard and details'), ('acknowledge', 'Can acknowledge results'), ('config', 'Can change the configuration for results'), ('refresh', 'Can refresh results')) def acknowledge(self, user, days, reason=None, commit=True): if self.status in (self.STATUS.warning, self.STATUS.critical) and self.is_acknowledged(): raise AlreadyAcknowledged() self.acknowledged_at = timezone.now() self.acknowledged_by = user self.acknowledged_until = timezone.now() + relativedelta.relativedelta(days=days) self.acknowledged_reason = reason or '' if commit: self.save(update_fields=['acknowledged_at', 'acknowledged_by', 'acknowledged_until', 'acknowledged_reason']) def is_acknowledged(self): return self.acknowledged_until and self.acknowledged_until >= timezone.now() def __str__(self): return self.slug def get_check_instance(self): return datawatch.get_check_class(self.slug)() def get_payload(self): return self.get_check_instance().get_payload(self.identifier) def get_formatted_data(self): return datawatch.get_check_class(self.slug)().format_result_data(self) @python_2_unicode_compatible class CheckExecution(models.Model): slug = models.TextField(verbose_name=_('Check module slug'), unique=True) last_run = models.DateTimeField(verbose_name=_('Last run')) objects = CheckExecutionQuerySet.as_manager() def __str__(self): return '%s on %s' % (self.slug, self.last_run)
Python
0
@@ -2680,16 +2680,228 @@ =True):%0A + # calculate end of requested acknowledgement%0A acknowledged_until = timezone.now() + relativedelta.relativedelta(days=days)%0A%0A # check that we're not accidentally overriding the current setup%0A @@ -2961,20 +2961,32 @@ ritical) - and +:%0A if self.is @@ -3000,18 +3000,71 @@ ledged() -:%0A + and self.acknowledged_until %3E acknowledged_until:%0A @@ -3215,63 +3215,26 @@ l = -timezone.now() + relativedelta.relativedelta(days=days) +acknowledged_until %0A
c8d781d24f647264357c6919b1568fb5d37dbcf5
use ADMIN_DATABASE instead of FRONTEND_DATABASE since the latter should not need write privileges to the database
dbmanage.py
dbmanage.py
#!/usr/bin/env python """ Imposter - Another weblog app Copyright (c) 2010 by Jochem Kossen <jochem.kossen@gmail.com> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This is the database management code for Imposter. It's used for installing a new Imposter database and upgrading it to newer versions. """ from migrate.versioning.api import version_control, upgrade from models import User, Tag, Status, Format, Post from database import DB from helpers import hashify, slugify from datetime import datetime import sys import getpass import config as cfg def vc_db(): """install SQLAlchemy-migrate versioning tables into database""" version_control(url=cfg.FRONTEND_DATABASE, repository='migrations/') def upgrade_db(): """upgrade database schema to latest version""" upgrade(url=cfg.FRONTEND_DATABASE, repository='migrations/') def add_initial_data(): # open database session db_session = DB(cfg.FRONTEND_DATABASE).get_session() # ask user for an admin username and password username = raw_input('Please enter the admin username: ') password = getpass.getpass(prompt='Please enter the admin password: ') # add user to database u = User(username, hashify(cfg.SECRET_KEY, password)) db_session.add(u) # create statuses s1 = Status('draft') s2 = Status('private') s3 = Status('public') db_session.add(s1) db_session.add(s2) db_session.add(s3) # create formats f = Format('rest') f2 = Format('markdown') db_session.add(f) db_session.add(f2) # Tags t1 = Tag('imposter') t2 = Tag('weblog') # build initial post and put it in the database initial_post_content = """ Imposter was installed correctly! This is just a sample post to show Imposter works. **Have a lot of fun blogging!** """ p1 = Post('Welcome to Imposter!', initial_post_content) p1.slug = slugify(p1.title) p1.createdate = datetime.now() p1.lastmoddate = datetime.now() p1.pubdate = datetime.now() p1.format = f p1.status = s3 p1.user = u p1.tags = [t1, t2] db_session.add(p1) db_session.commit() def install_db(): """shorthand which calls all functions necessary to install a new Imposter database""" vc_db() upgrade_db() add_initial_data() def usage(): """show dbmanage.py usage""" print 'usage: dbmanage.py install|upgrade' #--------------------------------------------------------------------------- # MAIN RUN LOOP if __name__ == '__main__': if len(sys.argv) < 2: usage() sys.exit(1) if sys.argv[1] == 'install': install_db() elif sys.argv[1] == 'upgrade': upgrade_db() else: usage() sys.exit(1)
Python
0.000001
@@ -1736,15 +1736,8 @@ nfig - as cfg %0A%0Ade @@ -1837,35 +1837,35 @@ ontrol(url=c -fg.FRONTEND +onfig.ADMIN _DATABASE, r @@ -1977,27 +1977,27 @@ de(url=c -fg.FRONTEND +onfig.ADMIN _DATABAS @@ -2103,19 +2103,19 @@ DB(c -fg.FRONTEND +onfig.ADMIN _DAT @@ -2383,17 +2383,20 @@ ashify(c -f +onfi g.SECRET
3ea8c93f34667effafb888f4ed350d80ba343c03
Convert seconds to an int before waiting for it.
lettuce_webdriver/css_selector_steps.py
lettuce_webdriver/css_selector_steps.py
import time from lettuce import step from lettuce import world from lettuce_webdriver.util import assert_true from lettuce_webdriver.util import assert_false from selenium.common.exceptions import WebDriverException import logging log = logging.getLogger(__name__) def wait_for_elem(browser, sel, timeout=15): start = time.time() elems = [] while time.time() - start < timeout: elems = find_elements_by_jquery(browser, sel) if elems: return elems time.sleep(0.2) return elems def load_script(url): """Ensure that JavaScript at a given URL is available to the browser.""" browser.execute_script(""" var script_tag = document.createElement("script"); script_tag.setAttribute("type", "text/javascript"); script_tag.setAttribute("src", arguments[0]); document.getElementsByTagName("head")[0].appendChild(script_tag); """, url) def find_elements_by_jquery(browser, selector): """Find HTML elements using jQuery-style selectors. Ensures that jQuery is available to the browser; if it gets a WebDriverException that looks like """ try: return browser.execute_script("""return $(arguments[0]).get();""", selector) except WebDriverException as e: if e.msg == u'$ is not defined': load_script("//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js") return browser.execute_script("""return $(arguments[0]).get();""", selector) else: raise @step(r'There should be an element matching \$\("(.*?)"\)$') def check_element_by_selector(step, selector): elems = find_elements_by_jquery(world.browser, selector) assert_true(step, elems) @step(r'There should be an element matching \$\("(.*?)"\) within (\d+) seconds?$') def wait_for_element_by_selector(step, selector, seconds): elems = wait_for_elem(world.browser, selector, seconds) assert_true(step, elems) @step(r'I fill in \$\("(.*?)"\) with "(.*?)"$') def fill_in_by_selector(step, selector, value): elem = find_elements_by_jquery(world.browser, selector)[0] elem.clear() elem.send_keys(value) @step(r'I submit \$\("(.*?)"\)') def submit_by_selector(step, selector): elem = find_elements_by_jquery(world.browser, selector)[0] elem.submit() @step(r'I check \$\("(.*?)"\)$') def check_by_selector(step, selector): elem = find_elements_by_jquery(world.browser, selector)[0] if not elem.is_selected(): elem.click() @step(r'I click \$\("(.*?)"\)$') def click_by_selector(step, selector): # No need for separate button press step with selector style. elem = find_elements_by_jquery(world.browser, selector)[0] elem.click() @step(r'I follow the link \$\("(.*?)"\)$') def click_by_selector(step, selector): elem = find_elements_by_jquery(world.browser, selector)[0] href = elem.get_attribute('href') world.browser.get(href) @step(r'\$\("(.*?)"\) should be selected$') def click_by_selector(step, selector): # No need for separate button press step with selector style. elem = find_elements_by_jquery(world.browser, selector)[0] assert_true(step, elem.is_selected()) __all__ = [ 'wait_for_element_by_selector', 'fill_in_by_selector', 'check_by_selector', 'click_by_selector', 'check_element_by_selector', ]
Python
0.003539
@@ -1897,24 +1897,28 @@ lector, +int( seconds) %0A ass @@ -1909,16 +1909,17 @@ seconds) +) %0A ass
3f18e4891b64c45fbda9ae88e9b508b5bc2cb03a
Add infinite loop; Add env vars
temp2dash.py
temp2dash.py
import json import requests import sys from temperusb import TemperHandler URL="http://dashing:3030/widgets/inside" SCALE=1.0 OFFSET=-3.0 th = TemperHandler() devs = th.get_devices() if len(devs) != 1: print "Expected exactly one TEMPer device, found %d" % len(devs) sys.exit(1) dev = devs[0] dev.set_calibration_data(scale=SCALE, offset=OFFSET) temperature = dev.get_temperature(sensor=1) payload = { 'auth_token': 'abcdefghijklmnopqrstuvwxyz', 'temperature': '%0.0f%s' % ( temperature, u'\N{DEGREE SIGN}', ), } post = requests.post(URL, data=json.dumps(payload)) if post.status_code != 204: sys.exit(255) sys.exit(0)
Python
0.000001
@@ -5,16 +5,26 @@ rt json%0A +import os%0A import r @@ -42,16 +42,45 @@ ort sys%0A +import time%0Aimport traceback%0A from tem @@ -112,72 +112,199 @@ er%0A%0A -%0A URL -=%22http://dashing:3030/widgets/inside%22%0ASCALE=1.0%0AOFFSET=-3.0%0A + = os.environ%5B'DASHING_URL'%5D%0ASCALE = float(os.environ%5B'TEMP_SCALE'%5D)%0AOFFSET = float(os.environ%5B'TEMP_OFFSET'%5D)%0ASENSOR = int(os.environ%5B'TEMP_SENSOR'%5D)%0ASLEEP = int(os.environ%5B'SLEEP_TIME'%5D) %0A%0Ath @@ -518,16 +518,39 @@ OFFSET)%0A +%0Awhile True:%0A try:%0A%09 temperat @@ -586,12 +586,138 @@ sor= -1)%0A%0A +SENSOR)%0A except Exception, err:%0A print %22%5CnException on getting temperature%5Cn%22%0A print traceback.format_exc()%0A%0A payl @@ -728,16 +728,20 @@ = %7B%0A + + 'auth_to @@ -776,16 +776,20 @@ vwxyz',%0A + 'tem @@ -821,16 +821,20 @@ + temperat @@ -846,16 +846,20 @@ + + u'%5CN%7BDEG @@ -878,122 +878,472 @@ -),%0A%7D%0A%0Apost = requests.post(URL, data=json.dumps(payload))%0A%0Aif post.status_code != 204:%0A sys.exit(255)%0A%0Asys.exit(0 + ),%0A %7D%0A%0A sys.stdout.write(u'%250.1f%25s, ' %25 (%0A temperature,%0A u'%5CN%7BDEGREE SIGN%7D',%0A ))%0A sys.stdout.flush()%0A%0A try:%0A post = requests.post(URL, data=json.dumps(payload))%0A except Exception, err:%0A print %22%5CnException on posting temperature to dashing%5Cn%22%0A print traceback.format_exc()%0A%0A if post.status_code != 204:%0A print %22%5CnHTTP status from POST was %25s (expected 204)%5Cn%22 %25 post.status_code%0A%0A time.sleep(SLEEP )%0A
b6bb16516e44580396a700d91fc2435fc575d422
remove debug code
dbt/main.py
dbt/main.py
from dbt.logger import getLogger import argparse import os.path import sys import re import dbt.version import dbt.project as project import dbt.task.run as run_task import dbt.task.compile as compile_task import dbt.task.debug as debug_task import dbt.task.clean as clean_task import dbt.task.deps as deps_task import dbt.task.init as init_task import dbt.task.seed as seed_task import dbt.task.test as test_task import dbt.task.archive as archive_task import dbt.tracking def is_opted_out(profiles_dir): profiles = project.read_profiles(profiles_dir) if profiles is None or profiles.get("config") is None: return False elif profiles['config'].get("send_anonymous_usage_stats") == False: return True else: return False def main(args=None): if args is None: args = sys.argv[1:] handle(args) def handle(args): try: parsed = parse_args(args) # this needs to happen after args are parsed so we can determine the correct profiles.yml file if is_opted_out(parsed.profiles_dir): dbt.tracking.do_not_track() else: dbt.tracking.set_profiles_dir(parsed.profiles_dir) res = run_from_args(parsed) dbt.tracking.flush() return res except RuntimeError as e: print("Encountered an error:") print(str(e)) sys.exit(1) def get_nearest_project_dir(): root_path = os.path.abspath(os.sep) cwd = os.getcwd() while cwd != root_path: project_file = os.path.join(cwd, "dbt_project.yml") if os.path.exists(project_file): return cwd cwd = os.path.dirname(cwd) return None def run_from_args(parsed): nearest_project_dir = get_nearest_project_dir() if nearest_project_dir is None: raise RuntimeError("dbt must be run from a project root directory with a dbt_project.yml file") os.chdir(nearest_project_dir) if parsed.which == 'init': # bypass looking for a project file if we're running `dbt init` task = parsed.cls(args=parsed) else: task, proj = invoke_dbt(parsed) dbt.tracking.track_invocation_start(project=proj, args=parsed) try: return task.run() dbt.tracking.track_invocation_end(project=proj, args=parsed, result_type="ok", result=None) except Exception as e: dbt.tracking.track_invocation_end(project=proj, args=parsed, result_type="error", result=str(e)) raise def invoke_dbt(parsed): task = None proj = None try: proj = project.read_project('dbt_project.yml', parsed.profiles_dir, validate=False, profile_to_load=parsed.profile) proj.validate() except project.DbtProjectError as e: print("Encountered an error while reading the project:") print(" ERROR {}".format(str(e))) print("Did you set the correct --profile? Using: {}".format(parsed.profile)) all_profiles = project.read_profiles(parsed.profiles_dir).keys() profiles_string = "\n".join([" - " + key for key in all_profiles]) print("Valid profiles:\n{}".format(profiles_string)) dbt.tracking.track_invalid_invocation(project=proj, args=parsed, result_type="invalid_profile", result=str(e)) return None if parsed.target is not None: targets = proj.cfg.get('outputs', {}).keys() if parsed.target in targets: proj.cfg['target'] = parsed.target else: print("Encountered an error while reading the project:") print(" ERROR Specified target {} is not a valid option for profile {}".format(parsed.target, proj.profile_to_load)) print("Valid targets are: {}".format(targets)) dbt.tracking.track_invalid_invocation(project=proj, args=parsed, result_type="invalid_target", result="target not found") return None log_dir = proj.get('log-path', 'logs') logger = getLogger(log_dir, __name__) logger.info("running dbt with arguments %s", parsed) task = parsed.cls(args=parsed, project=proj) return task, proj def parse_args(args): p = argparse.ArgumentParser(prog='dbt: data build tool', formatter_class=argparse.RawTextHelpFormatter) p.add_argument('--version', action='version', version=dbt.version.get_version_information(), help="Show version information") subs = p.add_subparsers() base_subparser = argparse.ArgumentParser(add_help=False) base_subparser.add_argument('--profiles-dir', default=project.default_profiles_dir, type=str, help='Which dir to look in for the profiles.yml file. Default = {}'.format(project.default_profiles_dir)) base_subparser.add_argument('--profile', required=False, type=str, help='Which profile to load (overrides profile setting in dbt_project.yml file)') base_subparser.add_argument('--target', default=None, type=str, help='Which target to load for the given profile') sub = subs.add_parser('init', parents=[base_subparser]) sub.add_argument('project_name', type=str, help='Name of the new project') sub.set_defaults(cls=init_task.InitTask, which='init') sub = subs.add_parser('clean', parents=[base_subparser]) sub.set_defaults(cls=clean_task.CleanTask, which='clean') sub = subs.add_parser('compile', parents=[base_subparser]) sub.add_argument('--dry', action='store_true', help="Compile 'dry run' models") sub.set_defaults(cls=compile_task.CompileTask, which='compile') sub = subs.add_parser('debug', parents=[base_subparser]) sub.set_defaults(cls=debug_task.DebugTask, which='debug') sub = subs.add_parser('deps', parents=[base_subparser]) sub.set_defaults(cls=deps_task.DepsTask, which='deps') sub = subs.add_parser('archive', parents=[base_subparser]) sub.add_argument('--threads', type=int, required=False, help="Specify number of threads to use while archiving tables. Overrides settings in profiles.yml") sub.set_defaults(cls=archive_task.ArchiveTask, which='archive') sub = subs.add_parser('run', parents=[base_subparser]) sub.add_argument('--dry', action='store_true', help="'dry run' models") sub.add_argument('--models', required=False, nargs='+', help="Specify the models to run. All models depending on these models will also be run") sub.add_argument('--threads', type=int, required=False, help="Specify number of threads to use while executing models. Overrides settings in profiles.yml") sub.set_defaults(cls=run_task.RunTask, which='run') sub = subs.add_parser('seed', parents=[base_subparser]) sub.add_argument('--drop-existing', action='store_true', help="Drop existing seed tables and recreate them") sub.set_defaults(cls=seed_task.SeedTask, which='seed') sub = subs.add_parser('test', parents=[base_subparser]) sub.add_argument('--data', action='store_true', help='Run data tests defined in "tests" directory') sub.add_argument('--schema', action='store_true', help='Run constraint validations from schema.yml files') sub.add_argument('--threads', type=int, required=False, help="Specify number of threads to use while executing tests. Overrides settings in profiles.yml") sub.set_defaults(cls=test_task.TestTask, which='test') if len(args) == 0: return p.print_help() parsed = p.parse_args(args) return parsed
Python
0.02323
@@ -1103,85 +1103,8 @@ ck() -%0A else:%0A dbt.tracking.set_profiles_dir(parsed.profiles_dir) %0A%0A
285ca0f2a469d0d11baad1120a5b0b1d0074aef3
Update dbworker.py (#2)
dbworker.py
dbworker.py
# -*- coding: utf-8 -*- from tinydb import TinyDB, Query from tinydb.operations import increment, decrement from texts import strings from config import db_file from utils import get_language DEFAULT_WORD_COUNT = 3 DEFAULT_PREFIX_SUFFIX = True DEFAULT_SEPARATOR = True db = TinyDB(db_file) def get_settings_text(user_id, lang_code): user = get_person(user_id) text = strings.get(get_language(lang_code)).get("settings").format(num_of_words=user["word_count"], prefixes=strings.get(get_language(lang_code)).get("yes") if user["prefixes"] else strings.get(get_language(lang_code)).get("no"), separators=strings.get(get_language(lang_code)).get("yes") if user["separators"] else strings.get(get_language(lang_code)).get("no")) return text def user_exists(user_id): return True if len(db.search(Query().user_id == user_id)) > 0 else False def get_person(user_id): # Check if user exists S = Query() person = db.search(S.user_id == user_id) if len(person) is 0: usr = {"user_id": user_id, "word_count": DEFAULT_WORD_COUNT, "prefixes": DEFAULT_PREFIX_SUFFIX, "separators": DEFAULT_SEPARATOR} db.insert(usr) return usr return person[0] def change_word_count(user_id, increase): S = Query() if increase: db.update(increment("word_count"), S.user_id == user_id) else: db.update(decrement("word_count"), S.user_id == user_id) return db.search(S.user_id == user_id)[0] def change_prefixes(user_id, enable_prefixes): S = Query() if enable_prefixes: db.update({"prefixes": True}, S.user_id == user_id) else: db.update({"prefixes": False}, S.user_id == user_id) return db.search(S.user_id == user_id)[0] def change_separators(user_id, enable_separators): S = Query() if enable_separators: db.update({"separators": True}, S.user_id == user_id) else: db.update({"separators": False}, S.user_id == user_id) return db.search(S.user_id == user_id)[0]
Python
0
@@ -830,19 +830,12 @@ urn -True if len +bool (db. @@ -873,23 +873,8 @@ id)) - %3E 0 else False %0A%0A%0Ad
991c6bc16388e4470193462c4ce63468b22ca79a
Remove __author__
__init__.py
__init__.py
from ioc import * __author__ = "Wes Alvaro" __copyright__ = "Copyright 2013 Google Inc." __license__ = "MIT, see LICENSE"
Python
0.00012
@@ -16,34 +16,8 @@ *%0A%0A -__author__ = %22Wes Alvaro%22%0A __co
19e12f1e492272bf4a69e0bc99106e78788b9c14
Add PEP8 line terminator before EOF
__init__.py
__init__.py
from extract import *
Python
0.000004
@@ -14,8 +14,9 @@ import * +%0A