text
stringlengths
4
1.02M
meta
dict
import logging import posixpath import threading from desktop.conf import TIME_ZONE from desktop.conf import DEFAULT_USER from desktop.lib.rest.http_client import HttpClient from desktop.lib.rest.resource import Resource from liboozie.conf import SECURITY_ENABLED from liboozie.conf import OOZIE_URL from liboozie.types import WorkflowList, CoordinatorList, Coordinator, Workflow,\ CoordinatorAction, WorkflowAction, BundleList, Bundle, BundleAction from liboozie.utils import config_gen LOG = logging.getLogger(__name__) DEFAULT_USER = DEFAULT_USER.get() API_VERSION = 'v1' # Overridden to v2 for SLA _XML_CONTENT_TYPE = 'application/xml;charset=UTF-8' _api_cache = None _api_cache_lock = threading.Lock() def get_oozie(user, api_version=API_VERSION): global _api_cache if _api_cache is None or _api_cache.api_version != api_version: _api_cache_lock.acquire() try: if _api_cache is None or _api_cache.api_version != api_version: secure = SECURITY_ENABLED.get() _api_cache = OozieApi(OOZIE_URL.get(), secure, api_version) finally: _api_cache_lock.release() _api_cache.setuser(user) return _api_cache class OozieApi(object): def __init__(self, oozie_url, security_enabled=False, api_version=API_VERSION): self._url = posixpath.join(oozie_url, api_version) self._client = HttpClient(self._url, logger=LOG) if security_enabled: self._client.set_kerberos_auth() self._root = Resource(self._client) self._security_enabled = security_enabled # To store username info self._thread_local = threading.local() self.api_version = api_version def __str__(self): return "OozieApi at %s" % (self._url,) @property def url(self): return self._url @property def security_enabled(self): return self._security_enabled @property def user(self): return self._thread_local.user def setuser(self, user): if hasattr(user, 'username'): self._thread_local.user = user.username else: self._thread_local.user = user def _get_params(self): if self.security_enabled: return { 'doAs': self.user, 'timezone': TIME_ZONE.get() } return { 'user.name': DEFAULT_USER, 'doAs': self.user, 'timezone': TIME_ZONE.get() } def _get_oozie_properties(self, properties=None): defaults = { 'user.name': self.user, } if properties is not None: defaults.update(properties) return defaults VALID_JOB_FILTERS = ('name', 'user', 'group', 'status') def get_jobs(self, jobtype, offset=None, cnt=None, filters=None): """ Get a list of Oozie jobs. Note that offset is 1-based. kwargs is used for filtering and may be one of VALID_FILTERS: name, user, group, status """ params = self._get_params() if offset is not None: params['offset'] = str(offset) if cnt is not None: params['len'] = str(cnt) if filters is None: filters = [] params['jobtype'] = jobtype filter_list = [] for key, val in filters: if key not in OozieApi.VALID_JOB_FILTERS: raise ValueError('"%s" is not a valid filter for selecting jobs' % (key,)) filter_list.append('%s=%s' % (key, val)) params['filter'] = ';'.join(filter_list) # Send the request resp = self._root.get('jobs', params) if jobtype == 'wf': wf_list = WorkflowList(self, resp, filters=filters) elif jobtype == 'coord': wf_list = CoordinatorList(self, resp, filters=filters) else: wf_list = BundleList(self, resp, filters=filters) return wf_list def get_workflows(self, offset=None, cnt=None, filters=None): return self.get_jobs('wf', offset, cnt, filters) def get_coordinators(self, offset=None, cnt=None, filters=None): return self.get_jobs('coord', offset, cnt, filters) def get_bundles(self, offset=None, cnt=None, filters=None): return self.get_jobs('bundle', offset, cnt, filters) # TODO: make get_job accept any jobid def get_job(self, jobid): """ get_job(jobid) -> Workflow """ params = self._get_params() resp = self._root.get('job/%s' % (jobid,), params) wf = Workflow(self, resp) return wf def get_coordinator(self, jobid): params = self._get_params() params.update({'len': -1}) resp = self._root.get('job/%s' % (jobid,), params) return Coordinator(self, resp) def get_bundle(self, jobid): params = self._get_params() resp = self._root.get('job/%s' % (jobid,), params) return Bundle(self, resp) def get_job_definition(self, jobid): """ get_job_definition(jobid) -> Definition (xml string) """ params = self._get_params() params['show'] = 'definition' xml = self._root.get('job/%s' % (jobid,), params) return xml def get_job_log(self, jobid): """ get_job_log(jobid) -> Log (xml string) """ params = self._get_params() params['show'] = 'log' xml = self._root.get('job/%s' % (jobid,), params) return xml def get_action(self, action_id): if 'C@' in action_id: Klass = CoordinatorAction elif 'B@' in action_id: Klass = BundleAction else: Klass = WorkflowAction params = self._get_params() resp = self._root.get('job/%s' % (action_id,), params) return Klass(resp) def job_control(self, jobid, action, properties=None, parameters=None): """ job_control(jobid, action) -> None Raise RestException on error. """ if action not in ('start', 'suspend', 'resume', 'kill', 'rerun', 'coord-rerun', 'bundle-rerun'): msg = 'Invalid oozie job action: %s' % (action,) LOG.error(msg) raise ValueError(msg) properties = self._get_oozie_properties(properties) params = self._get_params() params['action'] = action if parameters is not None: params.update(parameters) return self._root.put('job/%s' % jobid, params, data=config_gen(properties), contenttype=_XML_CONTENT_TYPE) def submit_workflow(self, application_path, properties=None): """ submit_workflow(application_path, properties=None) -> jobid Raise RestException on error. """ defaults = { 'oozie.wf.application.path': application_path, 'user.name': self.user, } if properties is not None: defaults.update(properties) properties = defaults return self.submit_job(properties) # Is name actually submit_coord? def submit_job(self, properties=None): """ submit_job(properties=None, id=None) -> jobid Raise RestException on error. """ defaults = { 'user.name': self.user, } if properties is not None: defaults.update(properties) properties = defaults params = self._get_params() resp = self._root.post('jobs', params, data=config_gen(properties), contenttype=_XML_CONTENT_TYPE) return resp['id'] def rerun(self, jobid, properties=None, params=None): properties = self._get_oozie_properties(properties) if params is None: params = self._get_params() else: self._get_params().update(params) params['action'] = 'rerun' return self._root.put('job/%s' % jobid, params, data=config_gen(properties), contenttype=_XML_CONTENT_TYPE) def get_build_version(self): """ get_build_version() -> Build version (dictionary) """ params = self._get_params() resp = self._root.get('admin/build-version', params) return resp def get_instrumentation(self): params = self._get_params() resp = self._root.get('admin/instrumentation', params) return resp def get_metrics(self): params = self._get_params() resp = self._root.get('admin/metrics', params) return resp def get_configuration(self): """ get_configuration() -> Oozie config (dictionary) """ params = self._get_params() resp = self._root.get('admin/configuration', params) return resp def get_oozie_status(self): """ get_oozie_status() -> Oozie status (dictionary) """ params = self._get_params() resp = self._root.get('admin/status', params) return resp def get_oozie_slas(self, **kwargs): """ filter= app_name=my-sla-app id=0000002-131206135002457-oozie-oozi-W nominal_start=2013-06-18T00:01Z nominal_end=2013-06-23T00:01Z """ params = self._get_params() params['filter'] = ';'.join(['%s=%s' % (key, val) for key, val in kwargs.iteritems()]) resp = self._root.get('sla', params) return resp['slaSummaryList']
{ "content_hash": "c2711e8d938e4604a9b4ef50cbda4cc0", "timestamp": "", "source": "github", "line_count": 294, "max_line_length": 112, "avg_line_length": 28.928571428571427, "alnum_prop": 0.6435038212815991, "repo_name": "yongshengwang/builthue", "id": "ab01ce604d381cf900a2c9951b11f77ce856868f", "size": "9275", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "desktop/libs/liboozie/src/liboozie/oozie_api.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "207947" }, { "name": "C", "bytes": "10774013" }, { "name": "C++", "bytes": "184593" }, { "name": "CSS", "bytes": "655282" }, { "name": "Emacs Lisp", "bytes": "14875" }, { "name": "GAP", "bytes": "11337" }, { "name": "Java", "bytes": "3080564" }, { "name": "JavaScript", "bytes": "2418037" }, { "name": "Makefile", "bytes": "86977" }, { "name": "Perl", "bytes": "161801" }, { "name": "PigLatin", "bytes": "282" }, { "name": "Prolog", "bytes": "4590" }, { "name": "Python", "bytes": "29990389" }, { "name": "Shell", "bytes": "38643" }, { "name": "TeX", "bytes": "129526" }, { "name": "Thrift", "bytes": "99710" }, { "name": "XSLT", "bytes": "367778" } ], "symlink_target": "" }
from sqlalchemy import create_engine, engine import pandas as pd import sys #Specify CSV file with flickr photos from 100M photo Database csv_file = sys.argv[1] DB_NAME = '' #Name of your SQL Database TABLE_NAME = '' #Name of your SQL Table def db_conn(df, DB_NAME , table_name): # MySql connection in sqlAlchemy engine = create_engine('mysql://root:password@localhost:3306/'+DB_NAME+'?charset=utf8') connection = engine.connect() # Do not insert the row number (index=False) df.to_sql(name=table_name, con=engine, if_exists='append', flavor='mysql', index=False, chunksize=2000) connection.close() if __name__ == '__main__': pics = pd.read_csv(csv_file,index_col=None) #If table exists then set index ('ID') to be at the end of the current table. try: engine = create_engine('mysql://root:password@localhost:3306/'+DB_NAME+'?charset=utf8') connection = engine.connect() max_ID_q = connection.execute("select max(ID) from "+TABLE_NAME+";") max_ID = -1 for item in max_ID_q: max_ID = item[0] indexer = range(max_ID+1, max_ID+1+pics.shape[0]) pics['ID'] = indexer except: pics['ID'] = range(pics.shape[0]) db_conn(pics, DB_NAME, TABLE_NAME)
{ "content_hash": "8e74d7471ef90ad9ce2e91b713d832d4", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 107, "avg_line_length": 26.822222222222223, "alnum_prop": 0.6752278376139188, "repo_name": "tjtorres/Random-Walk", "id": "35c3bd4dc99dfdb94bae9c15fc4209a1697d8d79", "size": "1207", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Classification_Scripts/database_entry.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "151884" }, { "name": "HTML", "bytes": "18783" }, { "name": "JavaScript", "bytes": "236056" }, { "name": "Python", "bytes": "18650" } ], "symlink_target": "" }
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('adjudication', '0007_auto_20190921_1705'), ] operations = [ migrations.AddField( model_name='appearance', name='area', field=models.CharField(blank=True, default='', help_text='Area representing', max_length=255), ), ]
{ "content_hash": "e7f9bf3d6013b0e28a8a476a34dee646", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 106, "avg_line_length": 24.875, "alnum_prop": 0.5979899497487438, "repo_name": "barberscore/barberscore-api", "id": "9ef789b6b4c39c9e70a98cd90035430d28a6dff5", "size": "447", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "project/apps/adjudication/migrations/0008_appearance_area.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "HTML", "bytes": "54125" }, { "name": "JavaScript", "bytes": "5861" }, { "name": "Procfile", "bytes": "114" }, { "name": "Python", "bytes": "766540" }, { "name": "Ruby", "bytes": "456" } ], "symlink_target": "" }
"""Implementations for various useful completers. These are all loaded by default by IPython. """ #----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team. # # Distributed under the terms of the BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Stdlib imports import glob import inspect import os import re import sys try: # Python >= 3.3 from importlib.machinery import all_suffixes _suffixes = all_suffixes() except ImportError: from imp import get_suffixes _suffixes = [ s[0] for s in get_suffixes() ] # Third-party imports from time import time from zipimport import zipimporter # Our own imports from IPython.core.completer import expand_user, compress_user from IPython.core.error import TryNext from IPython.utils._process_common import arg_split from IPython.utils.py3compat import string_types # FIXME: this should be pulled in with the right call via the component system from IPython import get_ipython #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- # Time in seconds after which the rootmodules will be stored permanently in the # ipython ip.db database (kept in the user's .ipython dir). TIMEOUT_STORAGE = 2 # Time in seconds after which we give up TIMEOUT_GIVEUP = 20 # Regular expression for the python import statement import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)' r'(?P<package>[/\\]__init__)?' r'(?P<suffix>%s)$' % r'|'.join(re.escape(s) for s in _suffixes)) # RE for the ipython %run command (python + ipython scripts) magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$') #----------------------------------------------------------------------------- # Local utilities #----------------------------------------------------------------------------- def module_list(path): """ Return the list containing the names of the modules available in the given folder. """ # sys.path has the cwd as an empty string, but isdir/listdir need it as '.' if path == '': path = '.' # A few local constants to be used in loops below pjoin = os.path.join if os.path.isdir(path): # Build a list of all files in the directory and all files # in its subdirectories. For performance reasons, do not # recurse more than one level into subdirectories. files = [] for root, dirs, nondirs in os.walk(path): subdir = root[len(path)+1:] if subdir: files.extend(pjoin(subdir, f) for f in nondirs) dirs[:] = [] # Do not recurse into additional subdirectories. else: files.extend(nondirs) else: try: files = list(zipimporter(path)._files.keys()) except: files = [] # Build a list of modules which match the import_re regex. modules = [] for f in files: m = import_re.match(f) if m: modules.append(m.group('name')) return list(set(modules)) def get_root_modules(): """ Returns a list containing the names of all the modules available in the folders of the pythonpath. ip.db['rootmodules_cache'] maps sys.path entries to list of modules. """ ip = get_ipython() rootmodules_cache = ip.db.get('rootmodules_cache', {}) rootmodules = list(sys.builtin_module_names) start_time = time() store = False for path in sys.path: try: modules = rootmodules_cache[path] except KeyError: modules = module_list(path) try: modules.remove('__init__') except ValueError: pass if path not in ('', '.'): # cwd modules should not be cached rootmodules_cache[path] = modules if time() - start_time > TIMEOUT_STORAGE and not store: store = True print("\nCaching the list of root modules, please wait!") print("(This will only be done once - type '%rehashx' to " "reset cache!)\n") sys.stdout.flush() if time() - start_time > TIMEOUT_GIVEUP: print("This is taking too long, we give up.\n") return [] rootmodules.extend(modules) if store: ip.db['rootmodules_cache'] = rootmodules_cache rootmodules = list(set(rootmodules)) return rootmodules def is_importable(module, attr, only_modules): if only_modules: return inspect.ismodule(getattr(module, attr)) else: return not(attr[:2] == '__' and attr[-2:] == '__') def try_import(mod, only_modules=False): try: m = __import__(mod) except: return [] mods = mod.split('.') for module in mods[1:]: m = getattr(m, module) m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__ completions = [] if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init: completions.extend( [attr for attr in dir(m) if is_importable(m, attr, only_modules)]) completions.extend(getattr(m, '__all__', [])) if m_is_init: completions.extend(module_list(os.path.dirname(m.__file__))) completions = set(completions) if '__init__' in completions: completions.remove('__init__') return list(completions) #----------------------------------------------------------------------------- # Completion-related functions. #----------------------------------------------------------------------------- def quick_completer(cmd, completions): """ Easily create a trivial completer for a command. Takes either a list of completions, or all completions in string (that will be split on whitespace). Example:: [d:\ipython]|1> import ipy_completers [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz']) [d:\ipython]|3> foo b<TAB> bar baz [d:\ipython]|3> foo ba """ if isinstance(completions, string_types): completions = completions.split() def do_complete(self, event): return completions get_ipython().set_hook('complete_command',do_complete, str_key = cmd) def module_completion(line): """ Returns a list containing the completion possibilities for an import line. The line looks like this : 'import xml.d' 'from xml.dom import' """ words = line.split(' ') nwords = len(words) # from whatever <tab> -> 'import ' if nwords == 3 and words[0] == 'from': return ['import '] # 'from xy<tab>' or 'import xy<tab>' if nwords < 3 and (words[0] in ['import','from']) : if nwords == 1: return get_root_modules() mod = words[1].split('.') if len(mod) < 2: return get_root_modules() completion_list = try_import('.'.join(mod[:-1]), True) return ['.'.join(mod[:-1] + [el]) for el in completion_list] # 'from xyz import abc<tab>' if nwords >= 3 and words[0] == 'from': mod = words[1] return try_import(mod) #----------------------------------------------------------------------------- # Completers #----------------------------------------------------------------------------- # These all have the func(self, event) signature to be used as custom # completers def module_completer(self,event): """Give completions after user has typed 'import ...' or 'from ...'""" # This works in all versions of python. While 2.5 has # pkgutil.walk_packages(), that particular routine is fairly dangerous, # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full # of possibly problematic side effects. # This search the folders in the sys.path for available modules. return module_completion(event.line) # FIXME: there's a lot of logic common to the run, cd and builtin file # completers, that is currently reimplemented in each. def magic_run_completer(self, event): """Complete files that end in .py or .ipy or .ipynb for the %run command. """ comps = arg_split(event.line, strict=False) # relpath should be the current token that we need to complete. if (len(comps) > 1) and (not event.line.endswith(' ')): relpath = comps[-1].strip("'\"") else: relpath = '' #print("\nev=", event) # dbg #print("rp=", relpath) # dbg #print('comps=', comps) # dbg lglob = glob.glob isdir = os.path.isdir relpath, tilde_expand, tilde_val = expand_user(relpath) # Find if the user has already typed the first filename, after which we # should complete on all files, since after the first one other files may # be arguments to the input script. if any(magic_run_re.match(c) for c in comps): matches = [f.replace('\\','/') + ('/' if isdir(f) else '') for f in lglob(relpath+'*')] else: dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)] pys = [f.replace('\\','/') for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') + lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')] matches = dirs + pys #print('run comp:', dirs+pys) # dbg return [compress_user(p, tilde_expand, tilde_val) for p in matches] def cd_completer(self, event): """Completer function for cd, which only returns directories.""" ip = get_ipython() relpath = event.symbol #print(event) # dbg if event.line.endswith('-b') or ' -b ' in event.line: # return only bookmark completions bkms = self.db.get('bookmarks', None) if bkms: return bkms.keys() else: return [] if event.symbol == '-': width_dh = str(len(str(len(ip.user_ns['_dh']) + 1))) # jump in directory history by number fmt = '-%0' + width_dh +'d [%s]' ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])] if len(ents) > 1: return ents return [] if event.symbol.startswith('--'): return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']] # Expand ~ in path and normalize directory separators. relpath, tilde_expand, tilde_val = expand_user(relpath) relpath = relpath.replace('\\','/') found = [] for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*') if os.path.isdir(f)]: if ' ' in d: # we don't want to deal with any of that, complex code # for this is elsewhere raise TryNext found.append(d) if not found: if os.path.isdir(relpath): return [compress_user(relpath, tilde_expand, tilde_val)] # if no completions so far, try bookmarks bks = self.db.get('bookmarks',{}) bkmatches = [s for s in bks if s.startswith(event.symbol)] if bkmatches: return bkmatches raise TryNext return [compress_user(p, tilde_expand, tilde_val) for p in found] def reset_completer(self, event): "A completer for %reset magic" return '-f -s in out array dhist'.split()
{ "content_hash": "b068bc474d8f1b76abad13fbc5f02976", "timestamp": "", "source": "github", "line_count": 352, "max_line_length": 82, "avg_line_length": 33.32386363636363, "alnum_prop": 0.5475703324808184, "repo_name": "Lightmatter/django-inlineformfield", "id": "08bff1c7cded352fc6c269d19a07848bdcc102ca", "size": "11730", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": ".tox/py27/lib/python2.7/site-packages/IPython/core/completerlib.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "43622" }, { "name": "Groff", "bytes": "3667" }, { "name": "HTML", "bytes": "108126" }, { "name": "JavaScript", "bytes": "853457" }, { "name": "Python", "bytes": "10506732" }, { "name": "Shell", "bytes": "3801" }, { "name": "Smarty", "bytes": "21023" } ], "symlink_target": "" }
import sys def get_file_text(path): """ Returns file text by path""" file_io = open(path, "r") text = file_io.read() file_io.close() return text def get_file_output(encoding="utf-8", path=sys.argv[-1], arg_string=""): """ Returns answer file output :param encoding: to decode output in python3 :param path: path of file to execute :return: list of strings """ import subprocess proc = subprocess.Popen([sys.executable, path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if arg_string: for arg in arg_string.split("\n"): proc.stdin.write(bytearray(str(arg) + "\n", encoding)) proc.stdin.flush() return list(map(lambda x: str(x.decode(encoding)), proc.communicate()[0].splitlines())) def test_file_importable(): """ Tests there is no obvious syntax errors""" path = sys.argv[-1] if not path.endswith(".py"): import os parent = os.path.abspath(os.path.join(path, os.pardir)) python_files = [f for f in os.listdir(parent) if os.path.isfile(os.path.join(parent, f)) and f.endswith(".py")] for python_file in python_files: if python_file == "tests.py": continue check_importable_path(os.path.join(parent, python_file)) return check_importable_path(path) def check_importable_path(path): """ Checks that file is importable. Reports failure otherwise. """ saved_input = patch_input() try: import_file(path) except: failed("The file contains syntax errors", test_file_importable.__name__) return finally: revert_input(saved_input) passed(test_file_importable.__name__) def patch_input(): def mock_fun(_m=""): return "mock" if sys.version_info[0] == 3: import builtins save_input = builtins.input builtins.input = mock_fun return save_input elif sys.version_info[0] == 2: import __builtin__ save_input = __builtin__.raw_input __builtin__.raw_input = mock_fun __builtin__.input = mock_fun return save_input def revert_input(saved_input): if sys.version_info[0] == 3: import builtins builtins.input = saved_input elif sys.version_info[0] == 2: import __builtin__ __builtin__.raw_input = saved_input __builtin__.input = saved_input def import_file(path): """ Returns imported file """ if sys.version_info[0] == 2 or sys.version_info[1] < 3: import imp return imp.load_source("tmp", path) elif sys.version_info[0] == 3: import importlib.machinery return importlib.machinery.SourceFileLoader("tmp", path).load_module("tmp") def import_task_file(): """ Returns imported file. Imports file from which check action was run """ path = sys.argv[-1] return import_file(path) def test_is_not_empty(): """ Checks that file is not empty """ path = sys.argv[-1] file_text = get_file_text(path) if len(file_text) > 0: passed() else: failed("The file is empty. Please, reload the task and try again.") def test_text_equals(text, error_text): """ Checks that answer equals text. """ path = sys.argv[-1] file_text = get_file_text(path) if file_text.strip() == text: passed() else: failed(error_text) def test_answer_placeholders_text_deleted(error_text="Don't just delete task text"): """ Checks that all answer placeholders are not empty """ windows = get_answer_placeholders() for window in windows: if len(window) == 0: failed(error_text) return passed() def set_congratulation_message(message): """ Overrides default 'Congratulations!' message """ print("#educational_plugin CONGRATS_MESSAGE " + message) def failed(message="Please, reload the task and try again.", name=None): """ Reports failure """ if not name: name = sys._getframe().f_back.f_code.co_name print("#educational_plugin " + name + " FAILED + " + message) def passed(name=None): """ Reports success """ if not name: name = sys._getframe().f_back.f_code.co_name print("#educational_plugin " + name + " test OK") def get_answer_placeholders(): """ Returns all answer placeholders text """ prefix = "#educational_plugin_window = " path = sys.argv[-1] import os file_name_without_extension = os.path.splitext(path)[0] windows_path = file_name_without_extension + "_windows" windows = [] f = open(windows_path, "r") window_text = "" first = True for line in f.readlines(): if line.startswith(prefix): if not first: windows.append(window_text.strip()) else: first = False window_text = line[len(prefix):] else: window_text += line if window_text: windows.append(window_text.strip()) f.close() return windows def check_samples(samples=()): """ Check script output for all samples. Sample is a two element list, where the first is input and the second is output. """ for sample in samples: if len(sample) == 2: output = get_file_output(arg_string=str(sample[0])) if "\n".join(output) != sample[1]: failed( "Test from samples failed: \n \n" "Input:\n{}" "\n \n" "Expected:\n{}" "\n \n" "Your result:\n{}".format(str.strip(sample[0]), str.strip(sample[1]), "\n".join(output))) return set_congratulation_message("All test from samples passed. Now we are checking your solution on Stepik server.") passed() def run_common_tests(error_text="Please, reload file and try again"): test_is_not_empty() test_answer_placeholders_text_deleted() test_file_importable()
{ "content_hash": "268e7bbb47193aaeaf7c2138571ab10f", "timestamp": "", "source": "github", "line_count": 224, "max_line_length": 119, "avg_line_length": 27.526785714285715, "alnum_prop": 0.5840090820629257, "repo_name": "RyanSkraba/beam", "id": "4959711adc23d12f1f7a961bcdb161445c113c59", "size": "6977", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "learning/katas/python/test_helper.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "ANTLR", "bytes": "1597" }, { "name": "CSS", "bytes": "40963" }, { "name": "Dockerfile", "bytes": "16638" }, { "name": "FreeMarker", "bytes": "7428" }, { "name": "Go", "bytes": "2683402" }, { "name": "Groovy", "bytes": "517560" }, { "name": "HTML", "bytes": "183330" }, { "name": "Java", "bytes": "28609011" }, { "name": "JavaScript", "bytes": "16595" }, { "name": "Jupyter Notebook", "bytes": "56365" }, { "name": "Python", "bytes": "6191025" }, { "name": "Ruby", "bytes": "4159" }, { "name": "Shell", "bytes": "235061" }, { "name": "TSQL", "bytes": "841" } ], "symlink_target": "" }
import pickle import redis from pod_manager.settings import REDIS_HOST, REDIS_PORT, REDIS_DB __all__ = [ 'get_client', 'cache_object', 'get_object' ] def get_client(): client = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB) return client def cache_object(client, key, obj, ttl=60): pipe = client.pipeline() data = pickle.dumps(obj) pipe.set(key, data) if ttl: pipe.expire(key, ttl) pipe.execute() def get_object(client, key): data = client.get(key) if not data: return None obj = pickle.loads(data) return obj
{ "content_hash": "4e643402be9d60657f9f31259fdb577d", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 71, "avg_line_length": 18.272727272727273, "alnum_prop": 0.6285240464344942, "repo_name": "racker/pod-manager", "id": "f67acf1b57aafb3c80e2a2c082939bde8bf2fb93", "size": "603", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pod_manager/db.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "6797" } ], "symlink_target": "" }
from __future__ import absolute_import # import apis into api package from .input_api import InputApi from .jobs_api import JobsApi from .output_api import OutputApi from .information_api import InformationApi from .conversion_api import ConversionApi
{ "content_hash": "26075d3bfab1a5a936719371cfbb4cfc", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 43, "avg_line_length": 28.22222222222222, "alnum_prop": 0.8110236220472441, "repo_name": "onlineconvert/onlineconvert-api-sdk-python", "id": "96d0613c65ca93d40e0b2d6af368b4e234313149", "size": "254", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "SwaggerPetstore/apis/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "90387" } ], "symlink_target": "" }
"""The tests for the Tasmota binary sensor platform.""" import copy from datetime import timedelta import json from hatasmota.utils import ( get_topic_stat_status, get_topic_stat_switch, get_topic_tele_sensor, get_topic_tele_will, ) from homeassistant.components import binary_sensor from homeassistant.components.tasmota.const import DEFAULT_PREFIX from homeassistant.const import ( ATTR_ASSUMED_STATE, EVENT_STATE_CHANGED, STATE_OFF, STATE_ON, ) import homeassistant.core as ha import homeassistant.util.dt as dt_util from .test_common import ( DEFAULT_CONFIG, help_test_availability, help_test_availability_discovery_update, help_test_availability_poll_state, help_test_availability_when_connection_lost, help_test_discovery_device_remove, help_test_discovery_removal, help_test_discovery_update_unchanged, help_test_entity_id_update_discovery_update, help_test_entity_id_update_subscriptions, ) from tests.async_mock import patch from tests.common import async_fire_mqtt_message, async_fire_time_changed async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() state = hass.states.get("binary_sensor.test") assert state.state == "unavailable" assert not state.attributes.get(ATTR_ASSUMED_STATE) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") state = hass.states.get("binary_sensor.test") assert state.state == STATE_OFF assert not state.attributes.get(ATTR_ASSUMED_STATE) # Test normal state update async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/SWITCH1", '{"STATE":"ON"}') state = hass.states.get("binary_sensor.test") assert state.state == STATE_ON async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/SWITCH1", '{"STATE":"OFF"}') state = hass.states.get("binary_sensor.test") assert state.state == STATE_OFF # Test periodic state update async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}') state = hass.states.get("binary_sensor.test") assert state.state == STATE_ON async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"OFF"}') state = hass.states.get("binary_sensor.test") assert state.state == STATE_OFF # Test polled state update async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS8", '{"StatusSNS":{"Switch1":"ON"}}' ) state = hass.states.get("binary_sensor.test") assert state.state == STATE_ON async_fire_mqtt_message( hass, "tasmota_49A3BC/stat/STATUS8", '{"StatusSNS":{"Switch1":"OFF"}}' ) state = hass.states.get("binary_sensor.test") assert state.state == STATE_OFF async def test_friendly_names(hass, mqtt_mock, setup_tasmota): """Test state update via MQTT.""" config = copy.deepcopy(DEFAULT_CONFIG) config["rl"][0] = 1 config["swc"][0] = 1 config["swc"][1] = 1 mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() state = hass.states.get("binary_sensor.tasmota_binary_sensor_1") assert state.state == "unavailable" assert state.attributes.get("friendly_name") == "Tasmota binary_sensor 1" state = hass.states.get("binary_sensor.beer") assert state.state == "unavailable" assert state.attributes.get("friendly_name") == "Beer" async def test_off_delay(hass, mqtt_mock, setup_tasmota): """Test off_delay option.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 13 # PUSHON: 1s off_delay mac = config["mac"] async_fire_mqtt_message( hass, f"{DEFAULT_PREFIX}/{mac}/config", json.dumps(config), ) await hass.async_block_till_done() events = [] @ha.callback def callback(event): """Verify event got called.""" events.append(event.data["new_state"].state) hass.bus.async_listen(EVENT_STATE_CHANGED, callback) async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online") await hass.async_block_till_done() assert events == ["off"] async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/SWITCH1", '{"STATE":"ON"}') await hass.async_block_till_done() state = hass.states.get("binary_sensor.test") assert state.state == STATE_ON assert events == ["off", "on"] async_fire_mqtt_message(hass, "tasmota_49A3BC/stat/SWITCH1", '{"STATE":"ON"}') await hass.async_block_till_done() state = hass.states.get("binary_sensor.test") assert state.state == STATE_ON assert events == ["off", "on", "on"] async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=1)) await hass.async_block_till_done() state = hass.states.get("binary_sensor.test") assert state.state == STATE_OFF assert events == ["off", "on", "on", "off"] async def test_availability_when_connection_lost( hass, mqtt_client_mock, mqtt_mock, setup_tasmota ): """Test availability after MQTT disconnection.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 await help_test_availability_when_connection_lost( hass, mqtt_client_mock, mqtt_mock, binary_sensor.DOMAIN, config ) async def test_availability(hass, mqtt_mock, setup_tasmota): """Test availability.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 await help_test_availability(hass, mqtt_mock, binary_sensor.DOMAIN, config) async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota): """Test availability discovery update.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 await help_test_availability_discovery_update( hass, mqtt_mock, binary_sensor.DOMAIN, config ) async def test_availability_poll_state( hass, mqtt_client_mock, mqtt_mock, setup_tasmota ): """Test polling after MQTT connection (re)established.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 poll_topic = "tasmota_49A3BC/cmnd/STATUS" await help_test_availability_poll_state( hass, mqtt_client_mock, mqtt_mock, binary_sensor.DOMAIN, config, poll_topic, "8" ) async def test_discovery_removal_binary_sensor(hass, mqtt_mock, caplog, setup_tasmota): """Test removal of discovered binary_sensor.""" config1 = copy.deepcopy(DEFAULT_CONFIG) config2 = copy.deepcopy(DEFAULT_CONFIG) config1["swc"][0] = 1 config2["swc"][0] = 0 await help_test_discovery_removal( hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config1, config2 ) async def test_discovery_update_unchanged_binary_sensor( hass, mqtt_mock, caplog, setup_tasmota ): """Test update of discovered binary_sensor.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 with patch( "homeassistant.components.tasmota.binary_sensor.TasmotaBinarySensor.discovery_update" ) as discovery_update: await help_test_discovery_update_unchanged( hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config, discovery_update ) async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota): """Test device registry remove.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 unique_id = f"{DEFAULT_CONFIG['mac']}_binary_sensor_switch_0" await help_test_discovery_device_remove( hass, mqtt_mock, binary_sensor.DOMAIN, unique_id, config ) async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota): """Test MQTT subscriptions are managed when entity_id is updated.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 topics = [ get_topic_stat_switch(config, 0), get_topic_tele_sensor(config), get_topic_stat_status(config, 8), get_topic_tele_will(config), ] await help_test_entity_id_update_subscriptions( hass, mqtt_mock, binary_sensor.DOMAIN, config, topics ) async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota): """Test MQTT discovery update when entity_id is updated.""" config = copy.deepcopy(DEFAULT_CONFIG) config["swc"][0] = 1 await help_test_entity_id_update_discovery_update( hass, mqtt_mock, binary_sensor.DOMAIN, config )
{ "content_hash": "a68de7d38d177f2f88a94b505fb7a2d9", "timestamp": "", "source": "github", "line_count": 259, "max_line_length": 93, "avg_line_length": 33.335907335907336, "alnum_prop": 0.6724577252721797, "repo_name": "sdague/home-assistant", "id": "52ab88b0ecb5814584d7b24fa992dc46d727238e", "size": "8634", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "tests/components/tasmota/test_binary_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "1488" }, { "name": "Python", "bytes": "27869189" }, { "name": "Shell", "bytes": "4528" } ], "symlink_target": "" }
from pandac.PandaModules import * import ShtikerPage from direct.gui.DirectGui import * from pandac.PandaModules import * from toontown.quest import Quests from toontown.toon import NPCToons from toontown.hood import ZoneUtil from toontown.toonbase import ToontownGlobals from toontown.toonbase import TTLocalizer from toontown.quest import QuestBookPoster from direct.directnotify import DirectNotifyGlobal class QuestPage(ShtikerPage.ShtikerPage): notify = DirectNotifyGlobal.directNotify.newCategory('QuestPage') def __init__(self): ShtikerPage.ShtikerPage.__init__(self) self.quests = {0: None, 1: None, 2: None, 3: None} self.textRolloverColor = Vec4(1, 1, 0, 1) self.textDownColor = Vec4(0.5, 0.9, 1, 1) self.textDisabledColor = Vec4(0.4, 0.8, 0.4, 1) self.onscreen = 0 self.lastQuestTime = globalClock.getRealTime() return def load(self): self.title = DirectLabel(parent=self, relief=None, text=TTLocalizer.QuestPageToonTasks, text_scale=0.12, textMayChange=0, pos=(0, 0, 0.6)) questFramePlaceList = ((-0.45, 0, 0.25, 0, 0, 0), (-0.45, 0, -0.35, 0, 0, 0), (0.45, 0, 0.25, 0, 0, 0), (0.45, 0, -0.35, 0, 0, 0)) self.questFrames = [] for i in xrange(ToontownGlobals.MaxQuestCarryLimit): frame = QuestBookPoster.QuestBookPoster(reverse=i > 1, mapIndex=i + 1) frame.reparentTo(self) frame.setPosHpr(*questFramePlaceList[i]) frame.setScale(1.06) self.questFrames.append(frame) self.accept('questsChanged', self.updatePage) return def acceptOnscreenHooks(self): self.accept(ToontownGlobals.QuestsHotkeyOn, self.showQuestsOnscreen) self.accept(ToontownGlobals.QuestsHotkeyOff, self.hideQuestsOnscreen) def ignoreOnscreenHooks(self): self.ignore(ToontownGlobals.QuestsHotkeyOn) self.ignore(ToontownGlobals.QuestsHotkeyOff) def unload(self): self.ignore('questsChanged') del self.title del self.quests del self.questFrames loader.unloadModel('phase_3.5/models/gui/stickerbook_gui') ShtikerPage.ShtikerPage.unload(self) def clearQuestFrame(self, index): self.questFrames[index].clear() self.quests[index] = None return def fillQuestFrame(self, questDesc, index): self.questFrames[index].update(questDesc) self.quests[index] = questDesc def getLowestUnusedIndex(self): for i in xrange(ToontownGlobals.MaxQuestCarryLimit): if self.quests[i] == None: return i return -1 def updatePage(self): self.notify.debug('updatePage()') newQuests = base.localAvatar.quests carryLimit = base.localAvatar.getQuestCarryLimit() for i in xrange(ToontownGlobals.MaxQuestCarryLimit): if i < carryLimit: self.questFrames[i].show() else: self.questFrames[i].hide() for index, questDesc in self.quests.items(): if questDesc is not None and list(questDesc) not in newQuests: self.clearQuestFrame(index) for questDesc in newQuests: newQuestDesc = tuple(questDesc) if newQuestDesc not in self.quests.values(): index = self.getLowestUnusedIndex() self.fillQuestFrame(newQuestDesc, index) for i, questDesc in self.quests.iteritems(): if questDesc: if self.canDeleteQuest(questDesc): self.questFrames[i].setDeleteCallback(self.__deleteQuest) else: self.questFrames[i].setDeleteCallback(None) self.questFrames[i].update(questDesc) else: self.questFrames[i].unbindMouseEnter() messenger.send('questPageUpdated') return def enter(self): self.updatePage() ShtikerPage.ShtikerPage.enter(self) def exit(self): ShtikerPage.ShtikerPage.exit(self) def showQuestsOnscreenTutorial(self): self.setPos(0, 0, -0.2) self.showQuestsOnscreen() def showQuestsOnscreen(self): messenger.send('wakeup') timedif = globalClock.getRealTime() - self.lastQuestTime if timedif < 0.7: return self.lastQuestTime = globalClock.getRealTime() if self.onscreen or base.localAvatar.invPage.onscreen: return self.onscreen = 1 for i in xrange(ToontownGlobals.MaxQuestCarryLimit): if hasattr(self.questFrames[i], 'mapIndex'): self.questFrames[i].mapIndex.show() self.updatePage() self.reparentTo(aspect2d) self.title.hide() self.show() def hideQuestsOnscreenTutorial(self): self.setPos(0, 0, 0) self.hideQuestsOnscreen() def hideQuestsOnscreen(self): if not self.onscreen: return self.onscreen = 0 for i in xrange(ToontownGlobals.MaxQuestCarryLimit): if hasattr(self.questFrames[i], 'mapIndex'): self.questFrames[i].mapIndex.hide() self.reparentTo(self.book) self.title.show() self.hide() def canDeleteQuest(self, questDesc): return Quests.isQuestJustForFun(questDesc[0], questDesc[3]) and self.onscreen == 0 def __deleteQuest(self, questDesc): base.localAvatar.d_requestDeleteQuest(questDesc)
{ "content_hash": "08efb600004f286c64d1e4186373abe6", "timestamp": "", "source": "github", "line_count": 175, "max_line_length": 146, "avg_line_length": 32.57714285714286, "alnum_prop": 0.6134011576916331, "repo_name": "ToonTownInfiniteRepo/ToontownInfinite", "id": "afc32a3ec858643ec4ef6893d7e87bd4e67e992f", "size": "5701", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "toontown/shtiker/QuestPage.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1703277" }, { "name": "C#", "bytes": "9892" }, { "name": "C++", "bytes": "5468044" }, { "name": "Emacs Lisp", "bytes": "210083" }, { "name": "F#", "bytes": "4611" }, { "name": "JavaScript", "bytes": "7003" }, { "name": "Objective-C", "bytes": "23212" }, { "name": "Puppet", "bytes": "5245" }, { "name": "Python", "bytes": "34010215" }, { "name": "Shell", "bytes": "11192" }, { "name": "Tcl", "bytes": "1981257" } ], "symlink_target": "" }
import argparse, os, csv, subprocess, shlex # this is a script for locating the DAM / DRMC integration metadata parser = argparse.ArgumentParser(description="this is a script for locating the DAM / DRMC integration metadata") parser.add_argument('-i', '--input', type=str, required=True, help='CSV of files missing metadata') parser.add_argument('-s', '--search', type=str, required=True, help='path to backed up CSVs') parser.add_argument('-o', '--output', type=str, required=False, help='where to put output new CSV to') args = parser.parse_args() if not (args.input): parser.error('you did not specify an input file') if not (args.search): parser.error('you did not specify a directory to search') firstline = True with open(args.input, 'rU') as csvfile: c = csv.writer(open(args.output, "wb")) c.writerow(["filename","ObjectID","Component Number", "dip_uuid"]) i = csv.reader(csvfile, delimiter=',') for row in i: if firstline: firstline = False continue cmd = "grep -i '"+row[0]+"' "+args.search+"/*.csv" p = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE) result = p.stdout.read() if result != "": result = result[result.index(':')+1:] result = result.split(',') print result c.writerow(result) # if row[0] != "": # request = urllib2.Request("http://drmc.museum.moma.org/api/aips/"+uuid) # base64string = base64.encodestring('%s:%s' % (args.username, password)).replace('\n', '') # request.add_header("Authorization", "Basic %s" % base64string) # try: # result = urllib2.urlopen(request) # start_date = row[4] # end_date = row[5] # start_date_trimmed = start_date[:-10] # end_date_trimmed = end_date[:-10] # data = json.load(result) # size = data['size'] # print start_date_trimmed, end_date_trimmed, size, uuid # c.writerow([start_date_trimmed,end_date_trimmed,size,uuid]) # except urllib2.HTTPError, e: # print "Could not find AIP! Error code " # print e.args
{ "content_hash": "6563dd352ca22cfa29f7bcb688ce03a1", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 113, "avg_line_length": 34.85964912280702, "alnum_prop": 0.6628082536487166, "repo_name": "finoradin/moma-utils", "id": "01bbb57f293e202c42081b5c7e2d0389c5da5fa0", "size": "2010", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dam-sync/find_dam_csv.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "59845" }, { "name": "Shell", "bytes": "3987" } ], "symlink_target": "" }
import re from datetime import datetime from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor from scrapy.http import Request from scrapy.selector import HtmlXPathSelector from scrapy.utils.response import get_base_url from scrapy.utils.url import urljoin_rfc from psp_cz.items import ParlMemb from .psp_cz_spider import get_parl_memb_id class PoslanciPspCzSpider(CrawlSpider): """ Spider crawls the psp.cz and gets information about parliament members """ name = "poslanci.psp.cz" allowed_domains = ["www.psp.cz"] # we will start from "Poslanecké kluby" page start_urls = [ "http://www.psp.cz/sqw/organy2.sqw?k=1" ] rules = ( # follow links to parliamentary political groups Rule(SgmlLinkExtractor(allow=('\/snem.sqw\?.*id\=',)), callback='parse_parl_polit_groups', follow=False), ) def parse_parl_polit_groups(self, response): """ Parses parliament political groups """ hxs = HtmlXPathSelector(response) base_url = get_base_url(response) memb_links = hxs.select('/html/body/div[2]/div/div/table/tbody//tr') for member_link in memb_links: region = member_link.select('td[1]/a/text()').extract()[0] region_url = urljoin_rfc(base_url, member_link.select('td[1]/a/@href').extract()[0]) group = member_link.select('td[2]/a/text()').extract()[0] group_long = member_link.select('td[2]/a/@title').extract()[0] group_url = urljoin_rfc(base_url, member_link.select('td[2]/a/@href').extract()[0]) request_url = urljoin_rfc(base_url, member_link.select('th/a/@href').extract()[0]) # There is one exception on Miroslava Němcová detail page which has # totally different structure. This URL parameter forces the same # structure for everyone, no exception for chairperson. request_url += '&zk=7' request = Request(request_url, self.parse_parl_memb, meta={'id':request_url, 'url':request_url, 'region':region, 'region_url':region_url, 'group':group, 'group_long':group_long, 'group_url':group_url}) yield request # this callback just adds sitting info to requests and follows links def parse_parl_memb(self, response): """ Parses parliament member info """ hxs = HtmlXPathSelector(response) base_url = get_base_url(response) picture_relative_url = hxs.select('//*[@id="main-content"]/div/div/div/a/img/@src').extract()[0] born_n_gender = hxs.select('//*[@id="main-content"]/div/div/div/div/p/strong/text()').re(r'(Narozen.*)')[0] gender = None if born_n_gender.find('Narozen:') != -1: gender = 'M' elif born_n_gender.find('Narozena:') != -1: gender = 'F' born = datetime.strptime(born_n_gender.split(' ', 1)[1], '%d.%m.%Y') parl_memb = ParlMemb() parl_memb['id'] = response.meta['id'] parl_memb['url'] = response.meta['url'][:-5] # remove &zk=7 parl_memb['region'] = response.meta['region'] parl_memb['region_url'] = response.meta['region_url'] parl_memb['group'] = response.meta['group'] parl_memb['group_long'] = response.meta['group_long'] parl_memb['group_url'] = response.meta['group_url'] parl_memb['name'] = hxs.select('//*[@id="main-content"]/h1/text()').extract()[0] parl_memb['born'] = born parl_memb['gender'] = gender parl_memb['image_urls'] = [urljoin_rfc(base_url, picture_relative_url)] parl_memb['parl_memb_id'] = get_parl_memb_id(parl_memb['url']) yield parl_memb
{ "content_hash": "bb95264460bec11cd2b9cf89b4b6d951", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 115, "avg_line_length": 43.891304347826086, "alnum_prop": 0.5747894997523526, "repo_name": "saxicek/psp-cz-spider", "id": "b2c449d36e7cd0008746271a363d69578df0db95", "size": "4056", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "psp_cz/spiders/poslanci_psp_cz_spider.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "27756" } ], "symlink_target": "" }
from __future__ import unicode_literals from datetime import datetime import sqlalchemy from sqlalchemy.orm import relationship, backref import uuid from test.functional.mock_box.db_model import DbModel class FileModel(DbModel): """DB Model for Box files.""" __tablename__ = 'box_file' id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True) # pylint:disable=invalid-name file_id = sqlalchemy.Column(sqlalchemy.String(32), default=lambda: uuid.uuid4().hex) name = sqlalchemy.Column(sqlalchemy.String(255)) parent_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('box_folder.id')) content = sqlalchemy.Column(sqlalchemy.LargeBinary) created_at = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.now) modified_at = sqlalchemy.Column(sqlalchemy.DateTime, onupdate=datetime.now) created_by_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('box_user.id')) created_by = relationship('UserModel', foreign_keys=[created_by_id]) owned_by_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey('box_user.id')) shared_link = relationship( 'ShareFileModel', backref=backref('shared_file', remote_side=[id]), cascade='save-update, delete', ) locks = relationship('LockModel', backref='item', cascade='save-update, delete') etag = sqlalchemy.Column(sqlalchemy.String(32), default=lambda: uuid.uuid4().hex) sequence_id = sqlalchemy.Column(sqlalchemy.Integer, nullable=False) sha1 = sqlalchemy.Column(sqlalchemy.String(40)) size = sqlalchemy.Column(sqlalchemy.Integer) __mapper_args__ = { "version_id_col": sequence_id, }
{ "content_hash": "909980a3e8a3de1702da76fa4ef1f7f3", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 115, "avg_line_length": 47.25, "alnum_prop": 0.7266313932980599, "repo_name": "jwkozel/demobx", "id": "0455c03c207e75392cdbcc29db3884e879fd66af", "size": "1718", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "test/functional/mock_box/db_model/file_model.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "309050" }, { "name": "Smarty", "bytes": "527" } ], "symlink_target": "" }
import sys import time import calendar def main(): if sys.argv[1:]: year = int(sys.argv[1]) else: year = int(raw_input('In which year were you born? ')) if 0 <= year < 100: print "I'll assume that by", year, year = year + 1900 print 'you mean', year, 'and not the early Christian era' elif not (1850 <= year <= time.localtime()[0]): print "It's hard to believe you were born in", year return if sys.argv[2:]: month = int(sys.argv[2]) else: month = int(raw_input('And in which month? (1-12) ')) if not (1 <= month <= 12): print 'There is no month numbered', month return if sys.argv[3:]: day = int(sys.argv[3]) else: day = int(raw_input('And on what day of that month? (1-31) ')) if month == 2 and calendar.isleap(year): maxday = 29 else: maxday = calendar.mdays[month] if not (1 <= day <= maxday): print 'There are no', day, 'days in that month!' return bdaytuple = (year, month, day) bdaydate = mkdate(bdaytuple) print 'You were born on', format(bdaytuple) todaytuple = time.localtime()[:3] todaydate = mkdate(todaytuple) print 'Today is', format(todaytuple) if bdaytuple > todaytuple: print 'You are a time traveler. Go back to the future!' return if bdaytuple == todaytuple: print 'You were born today. Have a nice life!' return days = todaydate - bdaydate print 'You have lived', days, 'days' age = 0 for y in range(year, todaytuple[0] + 1): if bdaytuple < (y, month, day) <= todaytuple: age = age + 1 print 'You are', age, 'years old' if todaytuple[1:] == bdaytuple[1:]: print 'Congratulations! Today is your', nth(age), 'birthday' print 'Yesterday was your', else: print 'Today is your', print nth(days - age), 'unbirthday' def format((year, month, day)): return '%d %s %d' % (day, calendar.month_name[month], year) def nth(n): if n == 1: return '1st' if n == 2: return '2nd' if n == 3: return '3rd' return '%dth' % n def mkdate((year, month, day)): # January 1st, in 0 A.D. is arbitrarily defined to be day 1, # even though that day never actually existed and the calendar # was different then... days = year*365 # years, roughly days = days + (year+3)//4 # plus leap years, roughly days = days - (year+99)//100 # minus non-leap years every century days = days + (year+399)//400 # plus leap years every 4 centirues for i in range(1, month): if i == 2 and calendar.isleap(year): days = days + 29 else: days = days + calendar.mdays[i] days = days + day return days if __name__ == "__main__": main()
{ "content_hash": "76e0c272ea1363f36b9d7a96c898f39f", "timestamp": "", "source": "github", "line_count": 97, "max_line_length": 73, "avg_line_length": 29.701030927835053, "alnum_prop": 0.5657757723012843, "repo_name": "nzavagli/UnrealPy", "id": "056ad44c463bea15f42eb909f0abdacde2777b23", "size": "3140", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Demo/scripts/unbirthday.py", "mode": "33188", "license": "mit", "language": [ { "name": "APL", "bytes": "587" }, { "name": "ASP", "bytes": "2753" }, { "name": "ActionScript", "bytes": "5686" }, { "name": "Ada", "bytes": "94225" }, { "name": "Agda", "bytes": "3154" }, { "name": "Alloy", "bytes": "6579" }, { "name": "ApacheConf", "bytes": "12482" }, { "name": "AppleScript", "bytes": "421" }, { "name": "Assembly", "bytes": "1093261" }, { "name": "AutoHotkey", "bytes": "3733" }, { "name": "AutoIt", "bytes": "667" }, { "name": "Awk", "bytes": "63276" }, { "name": "Batchfile", "bytes": "147828" }, { "name": "BlitzBasic", "bytes": "185102" }, { "name": "BlitzMax", "bytes": "2387" }, { "name": "Boo", "bytes": "1111" }, { "name": "Bro", "bytes": "7337" }, { "name": "C", "bytes": "108397183" }, { "name": "C#", "bytes": "156749" }, { "name": "C++", "bytes": "13535833" }, { "name": "CLIPS", "bytes": "6933" }, { "name": "CMake", "bytes": "12441" }, { "name": "COBOL", "bytes": "114812" }, { "name": "CSS", "bytes": "430375" }, { "name": "Ceylon", "bytes": "1387" }, { "name": "Chapel", "bytes": "4366" }, { "name": "Cirru", "bytes": "2574" }, { "name": "Clean", "bytes": "9679" }, { "name": "Clojure", "bytes": "23871" }, { "name": "CoffeeScript", "bytes": "20149" }, { "name": "ColdFusion", "bytes": "9006" }, { "name": "Common Lisp", "bytes": "49017" }, { "name": "Coq", "bytes": "66" }, { "name": "Cucumber", "bytes": "390" }, { "name": "Cuda", "bytes": "776" }, { "name": "D", "bytes": "7556" }, { "name": "DIGITAL Command Language", "bytes": "425938" }, { "name": "DTrace", "bytes": "6706" }, { "name": "Dart", "bytes": "591" }, { "name": "Dylan", "bytes": "6343" }, { "name": "Ecl", "bytes": "2599" }, { "name": "Eiffel", "bytes": "2145" }, { "name": "Elixir", "bytes": "4340" }, { "name": "Emacs Lisp", "bytes": "18303" }, { "name": "Erlang", "bytes": "5746" }, { "name": "F#", "bytes": "19156" }, { "name": "FORTRAN", "bytes": "38458" }, { "name": "Factor", "bytes": "10194" }, { "name": "Fancy", "bytes": "2581" }, { "name": "Fantom", "bytes": "25331" }, { "name": "GAP", "bytes": "29880" }, { "name": "GLSL", "bytes": "450" }, { "name": "Gnuplot", "bytes": "11501" }, { "name": "Go", "bytes": "5444" }, { "name": "Golo", "bytes": "1649" }, { "name": "Gosu", "bytes": "2853" }, { "name": "Groff", "bytes": "3458639" }, { "name": "Groovy", "bytes": "2586" }, { "name": "HTML", "bytes": "92126540" }, { "name": "Haskell", "bytes": "49593" }, { "name": "Haxe", "bytes": "16812" }, { "name": "Hy", "bytes": "7237" }, { "name": "IDL", "bytes": "2098" }, { "name": "Idris", "bytes": "2771" }, { "name": "Inform 7", "bytes": "1944" }, { "name": "Inno Setup", "bytes": "18796" }, { "name": "Ioke", "bytes": "469" }, { "name": "Isabelle", "bytes": "21392" }, { "name": "Jasmin", "bytes": "9428" }, { "name": "Java", "bytes": "4040623" }, { "name": "JavaScript", "bytes": "223927" }, { "name": "Julia", "bytes": "27687" }, { "name": "KiCad", "bytes": "475" }, { "name": "Kotlin", "bytes": "971" }, { "name": "LSL", "bytes": "160" }, { "name": "Lasso", "bytes": "18650" }, { "name": "Lean", "bytes": "6921" }, { "name": "Limbo", "bytes": "9891" }, { "name": "Liquid", "bytes": "862" }, { "name": "LiveScript", "bytes": "972" }, { "name": "Logos", "bytes": "19509" }, { "name": "Logtalk", "bytes": "7260" }, { "name": "Lua", "bytes": "8677" }, { "name": "Makefile", "bytes": "2053844" }, { "name": "Mask", "bytes": "815" }, { "name": "Mathematica", "bytes": "191" }, { "name": "Max", "bytes": "296" }, { "name": "Modelica", "bytes": "6213" }, { "name": "Modula-2", "bytes": "23838" }, { "name": "Module Management System", "bytes": "14798" }, { "name": "Monkey", "bytes": "2587" }, { "name": "Moocode", "bytes": "3343" }, { "name": "MoonScript", "bytes": "14862" }, { "name": "Myghty", "bytes": "3939" }, { "name": "NSIS", "bytes": "7663" }, { "name": "Nemerle", "bytes": "1517" }, { "name": "NewLisp", "bytes": "42726" }, { "name": "Nimrod", "bytes": "37191" }, { "name": "Nit", "bytes": "55581" }, { "name": "Nix", "bytes": "2448" }, { "name": "OCaml", "bytes": "42416" }, { "name": "Objective-C", "bytes": "104883" }, { "name": "Objective-J", "bytes": "15340" }, { "name": "Opa", "bytes": "172" }, { "name": "OpenEdge ABL", "bytes": "49943" }, { "name": "PAWN", "bytes": "6555" }, { "name": "PHP", "bytes": "68611" }, { "name": "PLSQL", "bytes": "45772" }, { "name": "Pan", "bytes": "1241" }, { "name": "Pascal", "bytes": "349743" }, { "name": "Perl", "bytes": "5931502" }, { "name": "Perl6", "bytes": "113623" }, { "name": "PigLatin", "bytes": "6657" }, { "name": "Pike", "bytes": "8479" }, { "name": "PostScript", "bytes": "18216" }, { "name": "PowerShell", "bytes": "14236" }, { "name": "Prolog", "bytes": "43750" }, { "name": "Protocol Buffer", "bytes": "3401" }, { "name": "Puppet", "bytes": "130" }, { "name": "Python", "bytes": "122886156" }, { "name": "QML", "bytes": "3912" }, { "name": "R", "bytes": "49247" }, { "name": "Racket", "bytes": "11341" }, { "name": "Rebol", "bytes": "17708" }, { "name": "Red", "bytes": "10536" }, { "name": "Redcode", "bytes": "830" }, { "name": "Ruby", "bytes": "91403" }, { "name": "Rust", "bytes": "6788" }, { "name": "SAS", "bytes": "15603" }, { "name": "SaltStack", "bytes": "1040" }, { "name": "Scala", "bytes": "730" }, { "name": "Scheme", "bytes": "50346" }, { "name": "Scilab", "bytes": "943" }, { "name": "Shell", "bytes": "2925097" }, { "name": "ShellSession", "bytes": "320" }, { "name": "Smali", "bytes": "832" }, { "name": "Smalltalk", "bytes": "158636" }, { "name": "Smarty", "bytes": "523" }, { "name": "SourcePawn", "bytes": "130" }, { "name": "Standard ML", "bytes": "36869" }, { "name": "Swift", "bytes": "2035" }, { "name": "SystemVerilog", "bytes": "265" }, { "name": "Tcl", "bytes": "6077233" }, { "name": "TeX", "bytes": "487999" }, { "name": "Tea", "bytes": "391" }, { "name": "TypeScript", "bytes": "535" }, { "name": "VHDL", "bytes": "4446" }, { "name": "VimL", "bytes": "32053" }, { "name": "Visual Basic", "bytes": "19441" }, { "name": "XQuery", "bytes": "4289" }, { "name": "XS", "bytes": "178055" }, { "name": "XSLT", "bytes": "1995174" }, { "name": "Xtend", "bytes": "727" }, { "name": "Yacc", "bytes": "25665" }, { "name": "Zephir", "bytes": "485" }, { "name": "eC", "bytes": "31545" }, { "name": "mupad", "bytes": "2442" }, { "name": "nesC", "bytes": "23697" }, { "name": "xBase", "bytes": "3349" } ], "symlink_target": "" }
import base64 import os import shutil import string import tempfile import unittest from datetime import timedelta from django.conf import settings from django.contrib.sessions.backends.cache import SessionStore as CacheSession from django.contrib.sessions.backends.cached_db import \ SessionStore as CacheDBSession from django.contrib.sessions.backends.db import SessionStore as DatabaseSession from django.contrib.sessions.backends.file import SessionStore as FileSession from django.contrib.sessions.backends.signed_cookies import \ SessionStore as CookieSession from django.contrib.sessions.exceptions import InvalidSessionKey from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.sessions.models import Session from django.core import management from django.core.cache import caches from django.core.cache.backends.base import InvalidCacheBackendError from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse from django.test import ( RequestFactory, TestCase, ignore_warnings, override_settings, ) from django.test.utils import patch_logger from django.utils import six, timezone from django.utils.encoding import force_text from django.utils.six.moves import http_cookies class SessionTestsMixin(object): # This does not inherit from TestCase to avoid any tests being run with this # class, which wouldn't work, and to allow different TestCase subclasses to # be used. backend = None # subclasses must specify def setUp(self): self.session = self.backend() def tearDown(self): # NB: be careful to delete any sessions created; stale sessions fill up # the /tmp (with some backends) and eventually overwhelm it after lots # of runs (think buildbots) self.session.delete() def test_new_session(self): self.assertFalse(self.session.modified) self.assertFalse(self.session.accessed) def test_get_empty(self): self.assertEqual(self.session.get('cat'), None) def test_store(self): self.session['cat'] = "dog" self.assertTrue(self.session.modified) self.assertEqual(self.session.pop('cat'), 'dog') def test_pop(self): self.session['some key'] = 'exists' # Need to reset these to pretend we haven't accessed it: self.accessed = False self.modified = False self.assertEqual(self.session.pop('some key'), 'exists') self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) self.assertEqual(self.session.get('some key'), None) def test_pop_default(self): self.assertEqual(self.session.pop('some key', 'does not exist'), 'does not exist') self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_setdefault(self): self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar') self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar') self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) def test_update(self): self.session.update({'update key': 1}) self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) self.assertEqual(self.session.get('update key', None), 1) def test_has_key(self): self.session['some key'] = 1 self.session.modified = False self.session.accessed = False self.assertIn('some key', self.session) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) def test_values(self): self.assertEqual(list(self.session.values()), []) self.assertTrue(self.session.accessed) self.session['some key'] = 1 self.assertEqual(list(self.session.values()), [1]) def test_iterkeys(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.iterkeys(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), ['x']) def test_itervalues(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.itervalues(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), [1]) def test_iteritems(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False i = six.iteritems(self.session) self.assertTrue(hasattr(i, '__iter__')) self.assertTrue(self.session.accessed) self.assertFalse(self.session.modified) self.assertEqual(list(i), [('x', 1)]) def test_clear(self): self.session['x'] = 1 self.session.modified = False self.session.accessed = False self.assertEqual(list(self.session.items()), [('x', 1)]) self.session.clear() self.assertEqual(list(self.session.items()), []) self.assertTrue(self.session.accessed) self.assertTrue(self.session.modified) def test_save(self): if (hasattr(self.session, '_cache') and 'DummyCache' in settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']): raise unittest.SkipTest("Session saving tests require a real cache backend") self.session.save() self.assertTrue(self.session.exists(self.session.session_key)) def test_delete(self): self.session.save() self.session.delete(self.session.session_key) self.assertFalse(self.session.exists(self.session.session_key)) def test_flush(self): self.session['foo'] = 'bar' self.session.save() prev_key = self.session.session_key self.session.flush() self.assertFalse(self.session.exists(prev_key)) self.assertNotEqual(self.session.session_key, prev_key) self.assertTrue(self.session.modified) self.assertTrue(self.session.accessed) def test_cycle(self): self.session['a'], self.session['b'] = 'c', 'd' self.session.save() prev_key = self.session.session_key prev_data = list(self.session.items()) self.session.cycle_key() self.assertNotEqual(self.session.session_key, prev_key) self.assertEqual(list(self.session.items()), prev_data) def test_invalid_key(self): # Submitting an invalid session key (either by guessing, or if the db has # removed the key) results in a new key being generated. try: session = self.backend('1') try: session.save() except AttributeError: self.fail( "The session object did not save properly. " "Middleware may be saving cache items without namespaces." ) self.assertNotEqual(session.session_key, '1') self.assertEqual(session.get('cat'), None) session.delete() finally: # Some backends leave a stale cache entry for the invalid # session key; make sure that entry is manually deleted session.delete('1') def test_session_key_is_read_only(self): def set_session_key(session): session.session_key = session._get_new_session_key() self.assertRaises(AttributeError, set_session_key, self.session) # Custom session expiry def test_default_expiry(self): # A normal session has a max age equal to settings self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) # So does a custom session with an idle expiration time of 0 (but it'll # expire at browser close) self.session.set_expiry(0) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_custom_expiry_seconds(self): modification = timezone.now() self.session.set_expiry(10) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_timedelta(self): modification = timezone.now() # Mock timezone.now, because set_expiry calls it on this code path. original_now = timezone.now try: timezone.now = lambda: modification self.session.set_expiry(timedelta(seconds=10)) finally: timezone.now = original_now date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_datetime(self): modification = timezone.now() self.session.set_expiry(modification + timedelta(seconds=10)) date = self.session.get_expiry_date(modification=modification) self.assertEqual(date, modification + timedelta(seconds=10)) age = self.session.get_expiry_age(modification=modification) self.assertEqual(age, 10) def test_custom_expiry_reset(self): self.session.set_expiry(None) self.session.set_expiry(10) self.session.set_expiry(None) self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE) def test_get_expire_at_browser_close(self): # Tests get_expire_at_browser_close with different settings and different # set_expiry calls with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False): self.session.set_expiry(10) self.assertFalse(self.session.get_expire_at_browser_close()) self.session.set_expiry(0) self.assertTrue(self.session.get_expire_at_browser_close()) self.session.set_expiry(None) self.assertFalse(self.session.get_expire_at_browser_close()) with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True): self.session.set_expiry(10) self.assertFalse(self.session.get_expire_at_browser_close()) self.session.set_expiry(0) self.assertTrue(self.session.get_expire_at_browser_close()) self.session.set_expiry(None) self.assertTrue(self.session.get_expire_at_browser_close()) def test_decode(self): # Ensure we can decode what we encode data = {'a test key': 'a test value'} encoded = self.session.encode(data) self.assertEqual(self.session.decode(encoded), data) def test_decode_failure_logged_to_security(self): bad_encode = base64.b64encode(b'flaskdj:alkdjf') with patch_logger('django.security.SuspiciousSession', 'warning') as calls: self.assertEqual({}, self.session.decode(bad_encode)) # check that the failed decode is logged self.assertEqual(len(calls), 1) self.assertIn('corrupted', calls[0]) def test_actual_expiry(self): # this doesn't work with JSONSerializer (serializing timedelta) with override_settings(SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'): self.session = self.backend() # reinitialize after overriding settings # Regression test for #19200 old_session_key = None new_session_key = None try: self.session['foo'] = 'bar' self.session.set_expiry(-timedelta(seconds=10)) self.session.save() old_session_key = self.session.session_key # With an expiry date in the past, the session expires instantly. new_session = self.backend(self.session.session_key) new_session_key = new_session.session_key self.assertNotIn('foo', new_session) finally: self.session.delete(old_session_key) self.session.delete(new_session_key) class DatabaseSessionTests(SessionTestsMixin, TestCase): backend = DatabaseSession def test_session_str(self): "Session repr should be the session key." self.session['x'] = 1 self.session.save() session_key = self.session.session_key s = Session.objects.get(session_key=session_key) self.assertEqual(force_text(s), session_key) def test_session_get_decoded(self): """ Test we can use Session.get_decoded to retrieve data stored in normal way """ self.session['x'] = 1 self.session.save() s = Session.objects.get(session_key=self.session.session_key) self.assertEqual(s.get_decoded(), {'x': 1}) def test_sessionmanager_save(self): """ Test SessionManager.save method """ # Create a session self.session['y'] = 1 self.session.save() s = Session.objects.get(session_key=self.session.session_key) # Change it Session.objects.save(s.session_key, {'y': 2}, s.expire_date) # Clear cache, so that it will be retrieved from DB del self.session._session_cache self.assertEqual(self.session['y'], 2) @override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db") def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ self.assertEqual(0, Session.objects.count()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # Two sessions are in the database before clearsessions... self.assertEqual(2, Session.objects.count()) management.call_command('clearsessions') # ... and one is deleted. self.assertEqual(1, Session.objects.count()) @override_settings(USE_TZ=True) class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests): pass class CacheDBSessionTests(SessionTestsMixin, TestCase): backend = CacheDBSession @unittest.skipIf('DummyCache' in settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'], "Session saving tests require a real cache backend") def test_exists_searches_cache_first(self): self.session.save() with self.assertNumQueries(0): self.assertTrue(self.session.exists(self.session.session_key)) # Some backends might issue a warning @ignore_warnings(module="django.core.cache.backends.base") def test_load_overlong_key(self): self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) @override_settings(SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): # 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS. self.assertRaises(InvalidCacheBackendError, self.backend) @override_settings(USE_TZ=True) class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests): pass # Don't need DB flushing for these tests, so can use unittest.TestCase as base class class FileSessionTests(SessionTestsMixin, unittest.TestCase): backend = FileSession def setUp(self): # Do file session tests in an isolated directory, and kill it after we're done. self.original_session_file_path = settings.SESSION_FILE_PATH self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp() # Reset the file session backend's internal caches if hasattr(self.backend, '_storage_path'): del self.backend._storage_path super(FileSessionTests, self).setUp() def tearDown(self): super(FileSessionTests, self).tearDown() settings.SESSION_FILE_PATH = self.original_session_file_path shutil.rmtree(self.temp_session_store) @override_settings( SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer") def test_configuration_check(self): del self.backend._storage_path # Make sure the file backend checks for a good storage dir self.assertRaises(ImproperlyConfigured, self.backend) def test_invalid_key_backslash(self): # Ensure we don't allow directory-traversal. # This is tested directly on _key_to_file, as load() will swallow # a SuspiciousOperation in the same way as an IOError - by creating # a new session, making it unclear whether the slashes were detected. self.assertRaises(InvalidSessionKey, self.backend()._key_to_file, "a\\b\\c") def test_invalid_key_forwardslash(self): # Ensure we don't allow directory-traversal self.assertRaises(InvalidSessionKey, self.backend()._key_to_file, "a/b/c") @override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file") def test_clearsessions_command(self): """ Test clearsessions command for clearing expired sessions. """ storage_path = self.backend._get_storage_path() file_prefix = settings.SESSION_COOKIE_NAME def count_sessions(): return len([session_file for session_file in os.listdir(storage_path) if session_file.startswith(file_prefix)]) self.assertEqual(0, count_sessions()) # One object in the future self.session['foo'] = 'bar' self.session.set_expiry(3600) self.session.save() # One object in the past other_session = self.backend() other_session['foo'] = 'bar' other_session.set_expiry(-3600) other_session.save() # Two sessions are in the filesystem before clearsessions... self.assertEqual(2, count_sessions()) management.call_command('clearsessions') # ... and one is deleted. self.assertEqual(1, count_sessions()) class CacheSessionTests(SessionTestsMixin, unittest.TestCase): backend = CacheSession # Some backends might issue a warning @ignore_warnings(module="django.core.cache.backends.base") def test_load_overlong_key(self): self.session._session_key = (string.ascii_letters + string.digits) * 20 self.assertEqual(self.session.load(), {}) def test_default_cache(self): self.session.save() self.assertNotEqual(caches['default'].get(self.session.cache_key), None) @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, 'sessions': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'session', }, }, SESSION_CACHE_ALIAS='sessions') def test_non_default_cache(self): # Re-initialize the session backend to make use of overridden settings. self.session = self.backend() self.session.save() self.assertEqual(caches['default'].get(self.session.cache_key), None) self.assertNotEqual(caches['sessions'].get(self.session.cache_key), None) class SessionMiddlewareTests(TestCase): @override_settings(SESSION_COOKIE_SECURE=True) def test_secure_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertTrue( response.cookies[settings.SESSION_COOKIE_NAME]['secure']) @override_settings(SESSION_COOKIE_HTTPONLY=True) def test_httponly_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertTrue( response.cookies[settings.SESSION_COOKIE_NAME]['httponly']) self.assertIn(http_cookies.Morsel._reserved['httponly'], str(response.cookies[settings.SESSION_COOKIE_NAME])) @override_settings(SESSION_COOKIE_HTTPONLY=False) def test_no_httponly_session_cookie(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly']) self.assertNotIn(http_cookies.Morsel._reserved['httponly'], str(response.cookies[settings.SESSION_COOKIE_NAME])) def test_session_save_on_500(self): request = RequestFactory().get('/') response = HttpResponse('Horrible error') response.status_code = 500 middleware = SessionMiddleware() # Simulate a request the modifies the session middleware.process_request(request) request.session['hello'] = 'world' # Handle the response through the middleware response = middleware.process_response(request, response) # Check that the value wasn't saved above. self.assertNotIn('hello', request.session.load()) def test_session_delete_on_end(self): request = RequestFactory().get('/') response = HttpResponse('Session test') middleware = SessionMiddleware() # Before deleting, there has to be an existing cookie request.COOKIES[settings.SESSION_COOKIE_NAME] = 'abc' # Simulate a request that ends the session middleware.process_request(request) request.session.flush() # Handle the response through the middleware response = middleware.process_response(request, response) # Check that the cookie was deleted, not recreated. # A deleted cookie header looks like: # Set-Cookie: sessionid=; expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/ self.assertEqual( 'Set-Cookie: {}=; expires=Thu, 01-Jan-1970 00:00:00 GMT; ' 'Max-Age=0; Path=/'.format(settings.SESSION_COOKIE_NAME), str(response.cookies[settings.SESSION_COOKIE_NAME]) ) # Don't need DB flushing for these tests, so can use unittest.TestCase as base class class CookieSessionTests(SessionTestsMixin, unittest.TestCase): backend = CookieSession def test_save(self): """ This test tested exists() in the other session backends, but that doesn't make sense for us. """ pass def test_cycle(self): """ This test tested cycle_key() which would create a new session key for the same session data. But we can't invalidate previously signed cookies (other than letting them expire naturally) so testing for this behavior is meaningless. """ pass @unittest.expectedFailure def test_actual_expiry(self): # The cookie backend doesn't handle non-default expiry dates, see #19201 super(CookieSessionTests, self).test_actual_expiry()
{ "content_hash": "51e92ae7ac3c9f523202d9c6e5edc7c7", "timestamp": "", "source": "github", "line_count": 634, "max_line_length": 106, "avg_line_length": 38.132492113564666, "alnum_prop": 0.6539129715420251, "repo_name": "willharris/django", "id": "112ac30d35b891a425e2da6060be82abfbb74749", "size": "24176", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "tests/sessions_tests/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "43000" }, { "name": "Gettext Catalog", "bytes": "9145447" }, { "name": "HTML", "bytes": "168768" }, { "name": "JavaScript", "bytes": "105614" }, { "name": "Makefile", "bytes": "125" }, { "name": "Python", "bytes": "10603386" }, { "name": "Shell", "bytes": "3056" }, { "name": "Smarty", "bytes": "130" } ], "symlink_target": "" }
import tornado.web class Header2(tornado.web.UIModule): def render(self, header): """ On this ui module example it is tested a second ui module package insertion to the application ui modules stack and, rendering a template from another component while rendering the ui module. :param header: The header string to be rendered by the module. :return: A header string """ return self.render_string('testapp:header2.html', header=header)
{ "content_hash": "51640cf267534de3d703e1b5fcbeb54b", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 79, "avg_line_length": 35.642857142857146, "alnum_prop": 0.687374749498998, "repo_name": "candango/firenado", "id": "f2b39e396b280fe3545a6e30c9e3cd0498c8ff54", "size": "1166", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "examples/testapp/components/internal/uimodules.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Gherkin", "bytes": "472" }, { "name": "HTML", "bytes": "5244" }, { "name": "Python", "bytes": "226801" }, { "name": "Shell", "bytes": "1289" } ], "symlink_target": "" }
from heat.common import exception from heat.common.i18n import _ from heat.engine import attributes from heat.engine import properties from heat.engine import resource from heat.engine import support class Host(resource.Resource): """A resource to manage Blazar hosts. Host resource manages the physical hosts for the lease/reservation within OpenStack. # TODO(asmita): Based on an agreement with Blazar team, this resource class does not support updating host resource as currently Blazar does not support to delete existing extra_capability keys while updating host. Also, in near future, when Blazar team will come up with a new alternative API to resolve this issue, we will need to modify this class. """ support_status = support.SupportStatus(version='12.0.0') PROPERTIES = ( NAME, EXTRA_CAPABILITY, ) = ( 'name', 'extra_capability', ) ATTRIBUTES = ( HYPERVISOR_HOSTNAME, HYPERVISOR_TYPE, HYPERVISOR_VERSION, VCPUS, CPU_INFO, MEMORY_MB, LOCAL_GB, SERVICE_NAME, RESERVABLE, STATUS, TRUST_ID, EXTRA_CAPABILITY_ATTR, CREATED_AT, UPDATED_AT, ) = ( 'hypervisor_hostname', 'hypervisor_type', 'hypervisor_version', 'vcpus', 'cpu_info', 'memory_mb', 'local_gb', 'service_name', 'reservable', 'status', 'trust_id', 'extra_capability', 'created_at', 'updated_at', ) properties_schema = { NAME: properties.Schema( properties.Schema.STRING, _('The name of the host.'), required=True, ), EXTRA_CAPABILITY: properties.Schema( properties.Schema.MAP, _('The extra capability of the host.'), ) } attributes_schema = { HYPERVISOR_HOSTNAME: attributes.Schema( _('The hypervisor name of the host.'), type=attributes.Schema.STRING, ), HYPERVISOR_TYPE: attributes.Schema( _('The hypervisor type the host.'), type=attributes.Schema.STRING, ), HYPERVISOR_VERSION: attributes.Schema( _('The hypervisor version of the host.'), type=attributes.Schema.INTEGER, ), VCPUS: attributes.Schema( _('The number of the VCPUs of the host.'), type=attributes.Schema.INTEGER, ), CPU_INFO: attributes.Schema( _('Information of the CPU of the host.'), type=attributes.Schema.MAP, ), MEMORY_MB: attributes.Schema( _('Megabytes of the memory of the host.'), type=attributes.Schema.INTEGER, ), LOCAL_GB: attributes.Schema( _('Gigabytes of the disk of the host.'), type=attributes.Schema.INTEGER, ), SERVICE_NAME: attributes.Schema( _('The compute service name of the host.'), type=attributes.Schema.STRING, ), RESERVABLE: attributes.Schema( _('The flag which represents whether the host is reservable ' 'or not.'), type=attributes.Schema.BOOLEAN, ), STATUS: attributes.Schema( _('The status of the host.'), type=attributes.Schema.STRING, ), TRUST_ID: attributes.Schema( _('The UUID of the trust of the host operator.'), type=attributes.Schema.STRING, ), EXTRA_CAPABILITY_ATTR: attributes.Schema( _('The extra capability of the host.'), type=attributes.Schema.MAP, ), CREATED_AT: attributes.Schema( _('The date and time when the host was created. ' 'The date and time format must be "CCYY-MM-DD hh:mm".'), type=attributes.Schema.STRING, ), UPDATED_AT: attributes.Schema( _('The date and time when the host was updated. ' 'The date and time format must be "CCYY-MM-DD hh:mm".'), type=attributes.Schema.STRING ), } default_client_name = 'blazar' entity = 'host' def _parse_extra_capability(self, args): if self.NAME in args[self.EXTRA_CAPABILITY]: # Remove "name" key if present in the extra_capability property. del args[self.EXTRA_CAPABILITY][self.NAME] args.update(args[self.EXTRA_CAPABILITY]) args.pop(self.EXTRA_CAPABILITY) return args def handle_create(self): args = dict((k, v) for k, v in self.properties.items() if v is not None) if self.EXTRA_CAPABILITY in args: args = self._parse_extra_capability(args) host = self.client_plugin().create_host(**args) self.resource_id_set(host['id']) return host['id'] def _resolve_attribute(self, name): if self.resource_id is None: return host = self.client_plugin().get_host(self.resource_id) try: return host[name] except KeyError: raise exception.InvalidTemplateAttribute(resource=self.name, key=name) def resource_mapping(): return { 'OS::Blazar::Host': Host }
{ "content_hash": "4c9fb3eb13ab0f1a5a8305a6b75a329a", "timestamp": "", "source": "github", "line_count": 154, "max_line_length": 78, "avg_line_length": 33.95454545454545, "alnum_prop": 0.5842417288200421, "repo_name": "openstack/heat", "id": "272fe90ce9dd1b94b2f3cbf047873f1fea44abee", "size": "5804", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "heat/engine/resources/openstack/blazar/host.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "9145593" }, { "name": "Shell", "bytes": "65832" } ], "symlink_target": "" }
from django.views.generic import TemplateView from django.views.decorators.cache import cache_page class HomeView(TemplateView): template_name = 'pages/home.html' home = cache_page(60 * 10)(HomeView.as_view())
{ "content_hash": "95662813a3b26f94dc93bd99612f0250", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 52, "avg_line_length": 30.857142857142858, "alnum_prop": 0.7685185185185185, "repo_name": "xiahuang119/easyus", "id": "bed0b72d889e18d15dca063a317cf4e8174cbe57", "size": "216", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "easyus/core/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "128071" }, { "name": "JavaScript", "bytes": "3790" }, { "name": "Python", "bytes": "17290" } ], "symlink_target": "" }
from google.cloud.video import stitcher_v1 async def sample_delete_cdn_key(): # Create a client client = stitcher_v1.VideoStitcherServiceAsyncClient() # Initialize request argument(s) request = stitcher_v1.DeleteCdnKeyRequest( name="name_value", ) # Make the request await client.delete_cdn_key(request=request) # [END videostitcher_v1_generated_VideoStitcherService_DeleteCdnKey_async]
{ "content_hash": "d217966e1690a7d1d0799fa9f930a85f", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 74, "avg_line_length": 25.235294117647058, "alnum_prop": 0.7272727272727273, "repo_name": "googleapis/python-video-stitcher", "id": "b7095fed0f0c97a7d15ce644ee8b0d28bcfc429d", "size": "1833", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "samples/generated_samples/videostitcher_v1_generated_video_stitcher_service_delete_cdn_key_async.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2050" }, { "name": "Python", "bytes": "1027964" }, { "name": "Shell", "bytes": "30684" } ], "symlink_target": "" }
from myhvac_service import cfg from myhvac_service import db from myhvac_service.db import models import logging from datetime import datetime, timedelta opts = [ cfg.IntOpt('max_measurement_age_threshold_min', default=12, help='The maximum age (in minutes) of the most recent temperature measurement for a ' 'sensor to be considered when calculating overall system temperature') ] CONF = cfg.CONF CONF.register_opts(opts, 'temp') LOG = logging.getLogger(__name__) def get_current_temp(): def do(session): temp_agg = 0 temp_cnt = 0 room_models = db.get_rooms(session) LOG.debug(room_models) for room_model in room_models: LOG.debug(room_model) if not room_model.active: continue room_temp = None measurement_agg = 0 measurement_cnt = 0 if room_model.sensors: for sensor_model in room_model.sensors: LOG.debug(sensor_model) measurement = db.get_most_recent_sensor_temperature(session, sensor_id=sensor_model.id, order_desc=True, order_by=models.Measurement.recorded_date) delta_min = CONF.temp.max_measurement_age_threshold_min if measurement and measurement.recorded_date > datetime.utcnow() - timedelta(minutes=delta_min): measurement_agg = measurement.data measurement_cnt = measurement_cnt + 1 if measurement_cnt > 0 and measurement_agg > 0: room_temp = measurement_agg / measurement_cnt if room_temp: temp_agg = temp_agg + (room_temp * room_model.weight) temp_cnt = temp_cnt + room_model.weight if not temp_cnt and not temp_agg: LOG.warn('No temperature data available. Either no data exists, or the data that exists is too old.') return None return temp_agg / temp_cnt return db.sessionize(do)
{ "content_hash": "c0f3c29da6b51a5fc750050a155a4624", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 116, "avg_line_length": 36.483870967741936, "alnum_prop": 0.5459770114942529, "repo_name": "alanquillin/myhvac_service", "id": "1f7ddcb3be07dd6593c9dc5d322303d8137390a9", "size": "2262", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "myhvac_service/temp.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "79166" } ], "symlink_target": "" }
import ssl as ssl_module from eventlet import patcher from oslo_serialization import jsonutils from subscription import Subscription kombu = patcher.import_patched('kombu') class MqClient(object): def __init__(self, login, password, host, port, virtual_host, ssl=False, ca_certs=None): ssl_params = None if ssl is True: ssl_params = { 'ca_certs': ca_certs, 'cert_reqs': ssl_module.CERT_REQUIRED } self._connection = kombu.Connection( 'amqp://{0}:{1}@{2}:{3}/{4}'.format( login, password, host, port, virtual_host ), ssl=ssl_params ) self._channel = None self._connected = False def __enter__(self): self.connect() return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() return False def connect(self): self._connection.connect() self._channel = self._connection.channel() self._connected = True def close(self): self._connection.close() self._connected = False def declare(self, queue, exchange='', enable_ha=False, ttl=0): if not self._connected: raise RuntimeError('Not connected to RabbitMQ') queue_arguments = {} if enable_ha is True: # To use mirrored queues feature in RabbitMQ 2.x # we need to declare this policy on the queue itself. # # Warning: this option has no effect on RabbitMQ 3.X, # to enable mirrored queues feature in RabbitMQ 3.X, please # configure RabbitMQ. queue_arguments['x-ha-policy'] = 'all' if ttl > 0: queue_arguments['x-expires'] = ttl exchange = kombu.Exchange(exchange, type='direct', durable=True) queue = kombu.Queue(queue, exchange, queue, durable=True, queue_arguments=queue_arguments) bound_queue = queue(self._connection) bound_queue.declare() def send(self, message, key, exchange=''): if not self._connected: raise RuntimeError('Not connected to RabbitMQ') producer = kombu.Producer(self._connection) producer.publish( exchange=str(exchange), routing_key=str(key), body=jsonutils.dumps(message.body), message_id=str(message.id) ) def open(self, queue, prefetch_count=1): if not self._connected: raise RuntimeError('Not connected to RabbitMQ') return Subscription(self._connection, queue, prefetch_count)
{ "content_hash": "1215c346f9c2083f78e255a085fcf9c4", "timestamp": "", "source": "github", "line_count": 88, "max_line_length": 72, "avg_line_length": 30.977272727272727, "alnum_prop": 0.5630961115187088, "repo_name": "olivierlemasle/murano", "id": "2a4465844caea189b72913b0d1beab508bf2c419", "size": "3309", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "murano/common/messaging/mqclient.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "152" }, { "name": "Mako", "bytes": "1013" }, { "name": "PowerShell", "bytes": "2772" }, { "name": "Puppet", "bytes": "86" }, { "name": "Python", "bytes": "1267810" }, { "name": "Ruby", "bytes": "444" }, { "name": "Shell", "bytes": "25578" } ], "symlink_target": "" }
from glob import glob import numpy as np def split(filename): data = np.loadtxt(filename, skiprows=1) # Put on Angstrom scale. data[:,0] *= 10. output_filename_base = ".".join(filename.split(".")[:-1]) # Split at points: split_points = (5500, 6250, 7000) color = ("blue", "green", "red", "ir") previous_point = 0 for i, point in enumerate(split_points): index = data[:,0].searchsorted(point) this_channel = data[previous_point:index] sampling_rate = int(np.floor(len(this_channel)/2000.)) np.savetxt("{0}_{1}.txt".format(output_filename_base, color[i]), this_channel[::sampling_rate]) previous_point = index last_channel = data[previous_point:] sampling_rate = int(np.floor(len(last_channel)/2000.)) np.savetxt("{0}_{1}.txt".format(output_filename_base, color[-1]), last_channel[::sampling_rate]) print("done") if __name__ == "__main__": files = glob("*narval.txt") map(split, files)
{ "content_hash": "f62a3cc72d4983b7e6be8aef7cf2bf47", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 103, "avg_line_length": 30.454545454545453, "alnum_prop": 0.6069651741293532, "repo_name": "andycasey/original-oracle", "id": "ebfd179e83000a7a023dcdc5a26f7508b4e83269", "size": "1005", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "oracle/tests/data/benchmarks/splitter.py", "mode": "33188", "license": "mit", "language": [ { "name": "AGS Script", "bytes": "33792642" }, { "name": "Assembly", "bytes": "1610" }, { "name": "C", "bytes": "5169" }, { "name": "CSS", "bytes": "677554" }, { "name": "FORTRAN", "bytes": "377579" }, { "name": "IDL", "bytes": "6113" }, { "name": "JavaScript", "bytes": "4492601" }, { "name": "Perl", "bytes": "447" }, { "name": "Python", "bytes": "341576" }, { "name": "Shell", "bytes": "1958" } ], "symlink_target": "" }
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase import vtk class vtkLineSource(SimpleVTKClassModuleBase): def __init__(self, module_manager): SimpleVTKClassModuleBase.__init__( self, module_manager, vtk.vtkLineSource(), 'Processing.', (), ('vtkPolyData',), replaceDoc=True, inputFunctions=None, outputFunctions=None)
{ "content_hash": "b930b0d9ab00d32b25d727cd78f58096", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 63, "avg_line_length": 36.90909090909091, "alnum_prop": 0.6502463054187192, "repo_name": "zhangfangyan/devide", "id": "305fd811e1bbab6a90531bb722fa4420096532cf", "size": "467", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "modules/vtk_basic/vtkLineSource.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "NSIS", "bytes": "2786" }, { "name": "Python", "bytes": "3102319" }, { "name": "Shell", "bytes": "7369" } ], "symlink_target": "" }
from spresso.controller.grant.settings import Setting from spresso.model.authentication.json_schema import WellKnownInfoDefinition, \ IdentityAssertionDefinition from spresso.model.settings import Container, Schema, Endpoint from spresso.utils.base import get_file_content class IdentityProvider(Setting): resource_path = "resources/authentication/" js_template = "script/idp.js" json_schemata = Container( Schema("info", WellKnownInfoDefinition()), Schema("sign", IdentityAssertionDefinition()) ) # Provider URL paths endpoints = Container( Endpoint("info", "/.well-known/spresso-info", ["GET"]), Endpoint("login", "/.well-known/spresso-login", ["GET", "POST"]), Endpoint("sign", "/sign", ["POST"]), ) # External URL path endpoints_ext = Container( Endpoint("proxy", "/.well-known/spresso-proxy", ["GET"]) ) # Subresource Integrity # Currently not in use, as SRI for iframes is currently under development. # This should be used in future versions. sri = False sri_hash = None def __init__(self, domain, private_key_path, public_key_path): super(IdentityProvider, self).__init__() self.domain = domain self.private_key = get_file_content(private_key_path, "rb") self.public_key = get_file_content(public_key_path, "r")
{ "content_hash": "4c13ce5dbd98c94df9a57192798c5b97", "timestamp": "", "source": "github", "line_count": 39, "max_line_length": 79, "avg_line_length": 35.256410256410255, "alnum_prop": 0.6676363636363636, "repo_name": "lujung/python-spresso", "id": "80caeb7009a6a91c9f3e249d412da1a926a8e477", "size": "1375", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "spresso/controller/grant/authentication/config/identity_provider.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "409" }, { "name": "HTML", "bytes": "3273" }, { "name": "JavaScript", "bytes": "10312" }, { "name": "Makefile", "bytes": "169" }, { "name": "Python", "bytes": "173709" } ], "symlink_target": "" }
import sys import json import ethtool import shlex import re def match_list(value, list): if len(list) == 0: return False for exp in list: if re.match(exp, value): return True return False def has_match(name, module_type, bus_type, name_list, module_type_list, bus_type_list): return match_list(name, name_list) or \ match_list(module_type, module_type_list) or match_list(bus_type, bus_type_list) # read the argument string from the arguments file args_file = sys.argv[1] args_data = file(args_file).read() exclude_names=[] exclude_module_types=[] exclude_bus_types=[] include_names=[] include_module_types=[] include_bus_types=[] ignore_names=[] ignore_module_types=["tun", "bridge", "bonding", "veth"] ignore_bus_types=["^\W*$", "N/A", "tap"] debug_out = False # parse the task options arguments = shlex.split(args_data) for arg in arguments: # exclude any arguments without an equals in it if "=" in arg: (key, value) = arg.split("=") if key == "exclude-names": exclude_names = re.split("\W*,\W*", value) elif key == "exclude-module-types": exclude_module_types = re.split("\W*,\W*", value) elif key == "exclude-bus-types": exclude_bus_types = re.split("\W*,\W*", value) elif key == "include-names": include_names = re.split("\W*,\W*", value) elif key == "include-module-types": include_module_types = re.split("\W*,\W*", value) elif key == "include-bus-types": include_bus_types = re.split("\W*,\W*", value) elif key == "ignore-names": ignore_names = re.split("\W*,\W*", value) elif key == "ignore-module-types": ignore_module_types = re.split("\W*,\W*", value) elif key == "ignore-bus-types": ignore_bus_types = re.split("\W*,\W*", value) elif key == "debug": debug_out = value.lower() in ["true", "yes", "on", "t", "y", "1"] elif key[0] != '_': raise ValueError('Unknown option to task "%s"' % key) included = {} ignored = {} excluded = {} debug = [] if debug_out: debug.append("EXCLUDES: '%s', '%s', '%s'" % (exclude_names, exclude_module_types, exclude_bus_types)) debug.append("INCLUDE: '%s', '%s', '%s'" % (include_names, include_module_types, include_bus_types)) debug.append("IGNORE: '%s', '%s', '%s'" % (ignore_names, ignore_module_types, ignore_bus_types)) for i in ethtool.get_devices(): o = { "name": i } try: module = ethtool.get_module(i) businfo = ethtool.get_businfo(i) # If it matches an ignore pattern then just ignore it. if has_match(i, module, businfo, ignore_names, ignore_module_types, ignore_bus_types): if debug_out: debug.append("IGNORE '%s' on ignore match" % i) ignored[i] = { "name": i, "module": module, } continue # If no include specifications have been set and the interface is not ignored # it needs to be considered for inclusion if len(include_names) + len(include_module_types) + len(include_bus_types) == 0: # If it matches exclude list then exclude it, else include it if has_match(i, module, businfo, exclude_names, exclude_module_types, exclude_bus_types): if debug_out: debug.append("EXCLUDE '%s' with no include specifiers, but with exclude match" %i) excluded[i] = { "name": i, "module": module, } continue if debug_out: debug.append("INCLUDE '%s' with no include specifiers, but with no exclude match" % i) included[i] = { "name": i, "module": module, } continue # If any of the include specifications are set then the interface must match at least one # to be added to the mached list. if has_match(i, module, businfo, include_names, include_module_types, include_bus_types): if debug_out: debug.append("MATCH '%s' has include match" % i) # If it matches exclude list then exclude it, else include it if has_match(i, module, businfo, exclude_names, exclude_module_types, exclude_bus_types): if debug_out: debug.append("EXCLUDE '%s' with include match, but also with exclude match" % i) excluded[i] = { "name": i, "module": module, } continue if debug_out: debug.append("INCLUDE '%s' with include match and with no exclude match" % i) included[i] = { "name": i, "module" : module, } continue # Implicitly ignore if debug_out: debug.append("IGNORE: '%s' implicitly" %i) ignored[i] = { "name": i, "module": module, } except: pass result = { "changed" : False, "ansible_facts" : { "netinfo" : { "included" : included, "excluded" : excluded, "ignored" : ignored, }, }, } if debug_out: result["ansible_facts"]["netinfo"]["debug"] = debug print json.dumps(result)
{ "content_hash": "f0d44b75b1cd9e8c3394c7da51d3d402", "timestamp": "", "source": "github", "line_count": 151, "max_line_length": 112, "avg_line_length": 34.966887417218544, "alnum_prop": 0.5611742424242424, "repo_name": "opencord/maas", "id": "78eabe00c315c6f500ffe6476392fef914c7ed5e", "size": "5898", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "library/netinfo.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Go", "bytes": "129747" }, { "name": "Makefile", "bytes": "4742" }, { "name": "Python", "bytes": "80511" }, { "name": "Shell", "bytes": "36702" }, { "name": "Smarty", "bytes": "1073" } ], "symlink_target": "" }
"""Streamlink extracts streams from various services. The main compontent of Streamlink is a command-line utility that launches the streams in a video player. An API is also provided that allows direct access to stream data. Full documentation is available at https://streamlink.github.io. """ import warnings from sys import version_info if version_info[:2] == (2, 6): warnings.warn( "Python 2.6 is no longer supported by the Python core team, please " "upgrade your Python. A future version of streamlink will drop " "support for Python 2.6", DeprecationWarning ) from ._version import get_versions __version__ = get_versions()['version'] del get_versions __title__ = "streamlink" __license__ = "Simplified BSD" __author__ = "Streamlink" __copyright__ = "Copyright 2018 Streamlink" __credits__ = [ "Agustín Carrasco (@asermax)", "Andrew Bashore (@bashtech)", "Andy Mikhailenko (@neithere)", "Athanasios Oikonomou (@athoik)", "Brian Callahan (@ibara)", "Che (@chhe)", "Christopher Rosell (@streamlink)", "Daniel Meißner (@meise)", "Daniel Miranda (@danielkza)", "Daniel Wallace (@gtmanfred)", "David Arvelo (@darvelo)", "Dominik Dabrowski (@doda)", "Erik G (@tboss)", "Eric J (@wormeyman)", "Ethan Jones (@jonesz)", "Gaspard Jankowiak (@gapato)", "Jaime Marquínez Ferrándiz (@jaimeMF)", "Jan Tore Morken (@jantore)", "John Peterson (@john-peterson)", "Jon Bergli Heier (@sn4kebite)", "Joseph Glanville (@josephglanville)", "Julian Richen (@FireDart)", "Kacper (@kasper93)", "Martin Panter (@vadmium)", "Max Nordlund (@maxnordlund)", "Michael Cheah (@cheah)", "Moritz Blanke", "Niall McAndrew (@niallm90)", "Niels Kräupl (@Gamewalker)", "Pascal Romahn (@skulblakka)", "Sam Edwards (@dotsam)", "Stefan Breunig (@breunigs)", "Suhail Patel (@suhailpatel)", "Sunaga Takahiro (@sunaga720)", "Vitaly Evtushenko (@eltiren)", "Warnar Boekkooi (@boekkooi)", "@blxd", "@btiom", "@daslicious", "@MasterofJOKers", "@mammothb", "@medina", "@monkeyphysics", "@nixxquality", "@papplampe", "@Raziel-23", "@t0mm0", "@ToadKing", "@unintended", "@wolftankk", "@yeeeargh" ] from .api import streams from .exceptions import (StreamlinkError, PluginError, NoStreamsError, NoPluginError, StreamError) from .session import Streamlink
{ "content_hash": "3828a10606b4e315920dfbb0448fdf30", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 76, "avg_line_length": 27.96629213483146, "alnum_prop": 0.6364001607071113, "repo_name": "javiercantero/streamlink", "id": "d401efa4a525a7102c7f276fc59ca38902727876", "size": "2509", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/streamlink/__init__.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Batchfile", "bytes": "838" }, { "name": "Python", "bytes": "1315591" }, { "name": "Shell", "bytes": "18633" } ], "symlink_target": "" }
from typing import TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy from ._version import VERSION if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any from azure.core.credentials import TokenCredential class DataBoxEdgeManagementClientConfiguration(Configuration): """Configuration for DataBoxEdgeManagementClient. Note that all parameters used to create this instance are saved as instance attributes. :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials.TokenCredential :param subscription_id: The subscription ID. :type subscription_id: str """ def __init__( self, credential, # type: "TokenCredential" subscription_id, # type: str **kwargs # type: Any ): # type: (...) -> None if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") super(DataBoxEdgeManagementClientConfiguration, self).__init__(**kwargs) self.credential = credential self.subscription_id = subscription_id self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'azure-mgmt-databoxedge/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs # type: Any ): # type: (...) -> None self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy') if self.credential and not self.authentication_policy: self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
{ "content_hash": "46e303719cd0359386f4b0a034eabfa9", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 129, "avg_line_length": 45.39344262295082, "alnum_prop": 0.6980859516070783, "repo_name": "Azure/azure-sdk-for-python", "id": "58d4b497e3aa68de4e7de0ca1aa52c7741d69dc4", "size": "3242", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/_configuration.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('sprints', '0004_remove_sprint_active'), ('tasks', '0005_auto_20140907_0246'), ] operations = [ migrations.AddField( model_name='task', name='sprint', field=models.ManyToManyField(related_name=b'tasks', null=True, to='sprints.Sprint', blank=True), preserve_default=True, ), ]
{ "content_hash": "b7a9721d1bd18977ef4ab60690b82028", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 108, "avg_line_length": 25.75, "alnum_prop": 0.6, "repo_name": "mc706/task-burndown", "id": "4083d4f474cadd6c15f35a2f091cc6bb4e56a1be", "size": "539", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tasks/migrations/0006_task_sprint.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "7411" }, { "name": "HTML", "bytes": "69877" }, { "name": "JavaScript", "bytes": "44199" }, { "name": "Python", "bytes": "24211" } ], "symlink_target": "" }
import os from homely._test import contents from homely._test.system import HOMELY, TempRepo, checkrepolist, getsystemfn def test_homely_remove(tmpdir, HOME): system = getsystemfn(HOME) def _addfake(name, createfile): # create a fake repo and add it tr = TempRepo(tmpdir, name) tf = os.path.join(HOME, createfile) contents(tr.remotepath + '/HOMELY.py', """ from homely.files import lineinfile lineinfile('~/%s', 'Hello from %s') """ % (createfile, name)) assert not os.path.exists(tf) system(HOMELY('add') + [tr.url]) assert contents(tf) == "Hello from %s\n" % name return tr r1 = _addfake('repo1', 'file1.txt') r2 = _addfake('repo2', 'file2.txt') r3 = _addfake('repo3', 'file3.txt') # check that all the repos are there checkrepolist(HOME, system, [r1, r2, r3]) assert contents(HOME + '/file1.txt', "Hello from repo1\n") assert contents(HOME + '/file2.txt', "Hello from repo2\n") assert contents(HOME + '/file3.txt', "Hello from repo3\n") # Check that the repo can be removed. system(HOMELY('forget') + [r1.repoid]) checkrepolist(HOME, system, [r2, r3]) assert contents(HOME + '/file1.txt', "Hello from repo1\n") assert contents(HOME + '/file2.txt', "Hello from repo2\n") assert contents(HOME + '/file3.txt', "Hello from repo3\n") # now run an update to make repo1's files go away system(HOMELY('update')) assert not os.path.exists(HOME + '/file1.txt') assert contents(HOME + '/file2.txt', "Hello from repo2\n") assert contents(HOME + '/file3.txt', "Hello from repo3\n") # Test removing multiple repos, but using local path this time # Note that because we don't use --update, the created files will still be # sitting around on disk system(HOMELY('forget') + ['~/repo2', '~/repo3']) checkrepolist(HOME, system, []) # repo2 and repo3 are stilling going to hang around on disk assert os.path.exists(HOME + '/repo2') assert os.path.exists(HOME + '/repo3') assert not os.path.exists(HOME + '/file1.txt') assert contents(HOME + '/file2.txt', "Hello from repo2\n") assert contents(HOME + '/file3.txt', "Hello from repo3\n")
{ "content_hash": "d1f792b8554e44e1ad9dfaf7beb40c48", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 78, "avg_line_length": 40.1578947368421, "alnum_prop": 0.6251638269986893, "repo_name": "toomuchphp/homely", "id": "4827220c54857d0734f8d138666dc5326889e3a8", "size": "2289", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "test/system/test_homely_remove.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "29834" } ], "symlink_target": "" }
import logging import os from argparse import ArgumentParser import six from grab.util.config import build_spider_config, build_root_config from grab.util.module import load_spider_class from weblib.logs import default_logging from weblib.files import clear_directory from weblib.encoding import make_str logger = logging.getLogger('grab.script.crawl') def setup_arg_parser(parser): parser.add_argument('spider_name', type=str) parser.add_argument('-t', '--thread-number', default=None, type=int, help='Number of network threads') parser.add_argument('-n', '--network-logs', action='store_true', default=False, help='Dump to console details about network requests') parser.add_argument('--disable-proxy', action='store_true', default=False, help='Disable proxy servers') parser.add_argument('--ignore-lock', action='store_true', default=False) parser.add_argument('--disable-report', action='store_true', default=False) parser.add_argument('--disable-default-logs', action='store_true', default=False) parser.add_argument('--settings-module', type=str, default='settings') parser.add_argument('--api-port', type=int, default=None) parser.add_argument('--mp-mode', action='store_true', default=False, help='Run task handlers (HTML parsers) in separate ' 'processes') parser.add_argument('--parser-pool-size', type=int) def get_lock_key(spider_name, lock_key=None, ignore_lock=False, **kwargs): # --ignore-lock has highest precedence if ignore_lock: return None # If --lock-key is specified explicitly use it if lock_key is not None: return lock_key # As fallback, if no information has been given about locking # generate lock key from the spider name and use it lock_key = 'crawl.%s' % spider_name return lock_key def save_list(lst, path): """ Save items from list to the file. """ with open(path, 'wb') as out: lines = [] for item in lst: if isinstance(item, (six.text_type, six.binary_type)): lines.append(make_str(item)) else: lines.append(make_str(json.dumps(item))) out.write(b'\n'.join(lines) + b'\n') def main(spider_name, thread_number=None, settings_module='settings', network_logs=False, disable_proxy=False, ignore_lock=False, disable_report=False, disable_default_logs=False, api_port=None, mp_mode=False, parser_pool_size=None, *args, **kwargs): if disable_default_logs: default_logging(propagate_network_logger=network_logs, grab_log=None, network_log=None) else: default_logging(propagate_network_logger=network_logs) root_config = build_root_config(settings_module) spider_class = load_spider_class(root_config, spider_name) spider_config = build_spider_config(spider_class, root_config) spider_args = None if hasattr(spider_class, 'setup_arg_parser'): parser = ArgumentParser() spider_class.setup_arg_parser(parser) opts, trash = parser.parse_known_args() spider_args = vars(opts) bot = spider_class( thread_number=thread_number, config=spider_config, network_try_limit=None, task_try_limit=None, args=spider_args, http_api_port=api_port, mp_mode=mp_mode, parser_pool_size=parser_pool_size, ) opt_queue = spider_config.get('queue') if opt_queue: bot.setup_queue(**opt_queue) opt_cache = spider_config.get('cache') if opt_cache: bot.setup_cache(**opt_cache) opt_proxy_list = spider_config.get('proxy_list') if opt_proxy_list: if disable_proxy: logger.debug('Proxy servers disabled via command line') else: bot.load_proxylist(**opt_proxy_list) opt_ifaces = spider_config.get('command_interfaces') if opt_ifaces: for iface_config in opt_ifaces: bot.controller.add_interface(**iface_config) try: bot.run() except KeyboardInterrupt: pass stats = bot.render_stats(timing=spider_config.get('display_timing')) stats_with_time = bot.render_stats(timing=True) if spider_config.get('display_stats'): logger.debug(stats) pid = os.getpid() logger.debug('Spider pid is %d' % pid) if not disable_report: if spider_config.get('save_report'): for subdir in (str(pid), 'last'): dir_ = 'var/%s' % subdir if not os.path.exists(dir_): os.makedirs(dir_) else: clear_directory(dir_) for key, lst in bot.stat.collections.items(): fname_key = key.replace('-', '_') save_list(lst, '%s/%s.txt' % (dir_, fname_key)) with open('%s/report.txt' % dir_, 'wb') as out: out.write(make_str(stats_with_time)) return { 'spider_stats': bot.render_stats(timing=False), 'spider_timing': bot.render_timing(), }
{ "content_hash": "01fa92b1ed757bba8b0ab4df70cf2ca7", "timestamp": "", "source": "github", "line_count": 154, "max_line_length": 79, "avg_line_length": 34.55844155844156, "alnum_prop": 0.6018414130026306, "repo_name": "maurobaraldi/grab", "id": "fb03f5e05de715efa0e6ab2527c42f072d6ce8f0", "size": "5322", "binary": false, "copies": "12", "ref": "refs/heads/master", "path": "grab/script/crawl.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "5434" }, { "name": "Makefile", "bytes": "910" }, { "name": "PostScript", "bytes": "2788" }, { "name": "Python", "bytes": "407798" } ], "symlink_target": "" }
"""Add encrypted password field Revision ID: 289ce07647b Revises: 2929af7925ed Create Date: 2015-11-21 11:18:00.650587 """ import sqlalchemy as sa from alembic import op # revision identifiers, used by Alembic. revision = "289ce07647b" down_revision = "2929af7925ed" def upgrade(): op.add_column("dbs", sa.Column("password", sa.LargeBinary(), nullable=True)) def downgrade(): op.drop_column("dbs", "password")
{ "content_hash": "970bf5e9046435745c5dafcacefbe063", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 80, "avg_line_length": 19.363636363636363, "alnum_prop": 0.7253521126760564, "repo_name": "zhouyao1994/incubator-superset", "id": "73273a4da6b1b60c2246538cfb1901e01d763c51", "size": "1211", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "superset/migrations/versions/2015-11-21_11-18_289ce07647b_add_encrypted_password_field.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "4776" }, { "name": "Dockerfile", "bytes": "6940" }, { "name": "HTML", "bytes": "1243911" }, { "name": "JavaScript", "bytes": "2445349" }, { "name": "Jinja", "bytes": "5542" }, { "name": "Jupyter Notebook", "bytes": "1925627" }, { "name": "Less", "bytes": "106438" }, { "name": "Makefile", "bytes": "3946" }, { "name": "Mako", "bytes": "1197" }, { "name": "Pug", "bytes": "2969" }, { "name": "Python", "bytes": "6296253" }, { "name": "Shell", "bytes": "56211" }, { "name": "Smarty", "bytes": "4298" }, { "name": "TypeScript", "bytes": "6909337" } ], "symlink_target": "" }
from netforce.model import Model, fields, get_model from netforce.utils import get_data_path import time class SaleStage(Model): _name = "sale.stage" _string = "Sales Stage" _key = ["name"] _fields = { "name": fields.Char("Name", required=True, search=True), "sequence": fields.Char("Sequence"), "comments": fields.One2Many("message", "related_id", "Comments"), } _order = "sequence" SaleStage.register()
{ "content_hash": "470f516398ec476967437af753614c5a", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 73, "avg_line_length": 26.823529411764707, "alnum_prop": 0.6337719298245614, "repo_name": "nfco/netforce", "id": "89037400986a6e7f21f5159cdc22277ff809c74b", "size": "1561", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "netforce_sale/netforce_sale/models/sale_stage.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "73" }, { "name": "CSS", "bytes": "407336" }, { "name": "HTML", "bytes": "478918" }, { "name": "Java", "bytes": "11870" }, { "name": "JavaScript", "bytes": "3712147" }, { "name": "Makefile", "bytes": "353" }, { "name": "PHP", "bytes": "2274" }, { "name": "Python", "bytes": "3469515" }, { "name": "Roff", "bytes": "15858" }, { "name": "Shell", "bytes": "117" } ], "symlink_target": "" }
import aiohttp.web test_routes = aiohttp.web.RouteTableDef() @test_routes.patch("/jobs/{job_id}") def public_test_route(request: aiohttp.web.Request): return aiohttp.web.Response(status=200) @test_routes.get("/not_public") def non_public_test_route(request: aiohttp.web.Request): return aiohttp.web.Response(status=200) async def test_public_routes_are_public(spawn_job_client): client = await spawn_job_client(authorize=False, add_route_table=test_routes) job_id = "test_job" insert_result = await client.db.jobs.insert_one({"_id": job_id}) assert insert_result["_id"] == job_id response = await client.patch(f"/jobs/{job_id}") assert response.status == 200 async def test_unauthorized_when_header_missing(spawn_job_client): client = await spawn_job_client(authorize=False, add_route_table=test_routes) response = await client.get("/not_public") assert response.status == 401 async def test_unauthorized_when_header_invalid(spawn_job_client): client = await spawn_job_client(authorize=False, add_route_table=test_routes) response = await client.get( "/not_public", headers={ "Authorization": "Basic job-not_a_job_id:not_a_key", }, ) assert response.status == 401 async def test_authorized_when_header_is_valid(spawn_job_client): client = await spawn_job_client(authorize=True, add_route_table=test_routes) response = await client.get("/not_public") assert response.status == 200
{ "content_hash": "b155fbe02dfbf09ed8533635ed8bf7e4", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 81, "avg_line_length": 27.472727272727273, "alnum_prop": 0.6949040370615487, "repo_name": "virtool/virtool", "id": "27a235fa9ed79eea8ca302f84166ad4f241d599c", "size": "1511", "binary": false, "copies": "2", "ref": "refs/heads/main", "path": "tests/jobs/test_auth.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "961" }, { "name": "HTML", "bytes": "44858" }, { "name": "Python", "bytes": "1316464" } ], "symlink_target": "" }
import unittest import numpy import chainer from chainer import cuda import chainer.functions as F from chainer import gradient_check from chainer import testing from chainer.testing import attr from chainer.testing import condition class UnaryFunctionsTestBase(object): def make_data(self): raise NotImplementedError def setUp(self): self.x, self.gy = self.make_data() def check_forward(self, op, op_np, x_data): x = chainer.Variable(x_data) y = op(x) gradient_check.assert_allclose( op_np(self.x), y.data, atol=1e-7, rtol=1e-7) def check_forward_cpu(self, op, op_np): self.check_forward(op, op_np, self.x) def check_forward_gpu(self, op, op_np): self.check_forward(op, op_np, cuda.to_gpu(self.x)) @condition.retry(3) def test_exp_forward_cpu(self): self.check_forward_cpu(F.exp, numpy.exp) @condition.retry(3) def test_log_forward_cpu(self): self.check_forward_cpu(F.log, numpy.log) @attr.gpu @condition.retry(3) def test_exp_forward_gpu(self): self.check_forward_gpu(F.exp, numpy.exp) @attr.gpu @condition.retry(3) def test_log_forward_gpu(self): self.check_forward_gpu(F.log, numpy.log) def check_backward(self, op, x_data, y_grad): gradient_check.check_backward(op, x_data, y_grad) def check_backward_cpu(self, op): self.check_backward(op, self.x, self.gy) def check_backward_gpu(self, op): self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy)) @condition.retry(3) def test_exp_backward_cpu(self): self.check_backward_cpu(F.exp) @condition.retry(3) def test_log_backward_cpu(self): self.check_backward_cpu(F.log) @attr.gpu @condition.retry(3) def test_exp_backward_gpu(self): self.check_backward_gpu(F.exp) @attr.gpu @condition.retry(3) def test_log_backward_gpu(self): self.check_backward_gpu(F.log) def test_exp(self): self.assertEqual(F.Exp().label, 'exp') def test_log(self): self.assertEqual(F.Log().label, 'log') class TestUnaryFunctionsSimple(UnaryFunctionsTestBase, unittest.TestCase): def make_data(self): x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32) gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32) return x, gy class TestUnaryFunctionsZeroDimension(UnaryFunctionsTestBase, unittest.TestCase): def make_data(self): x = numpy.random.uniform(.5, 1, ()).astype(numpy.float32) gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32) return x, gy testing.run_module(__name__, __file__)
{ "content_hash": "6e8d48b1c4b1c0499c58ebb2c230752a", "timestamp": "", "source": "github", "line_count": 103, "max_line_length": 74, "avg_line_length": 26.78640776699029, "alnum_prop": 0.6357375860819138, "repo_name": "AlpacaDB/chainer", "id": "814beefcd245b534950365fa681e087fc33a1ed5", "size": "2759", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tests/chainer_tests/functions_tests/math_tests/test_exponential.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "3366" }, { "name": "C", "bytes": "29175" }, { "name": "Cuda", "bytes": "6118" }, { "name": "PowerShell", "bytes": "7195" }, { "name": "Python", "bytes": "1498625" } ], "symlink_target": "" }
import sys import socket import argparse from parse_arguments import argParseFunction def createSocket(host, port): try: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) print "Socket created" except socket.error, msg: print "Failed to create socket. Error code: "+str(msg[0])+". Message "+msg[1] sys.exit() while True: msg = raw_input("Enter your message: ") try: s.sendto(msg, (host,port)) (data, addr) = s.recvfrom(1024) print "Server reply: "+data except socket.error, msg: print "Error code: "+str(msg[0])+". Message "+msg[1] sys.exit() def main(): args = argParseFunction() try: createSocket(args.host, args.port) except KeyboardInterrupt: sys.exit() if __name__ == "__main__": main()
{ "content_hash": "402bd3687f438f3dca8953c5ae31fd41", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 79, "avg_line_length": 22.393939393939394, "alnum_prop": 0.6725304465493911, "repo_name": "Digoss/funny_python", "id": "5df1c0f4cf86bce97f6270bfa4b416cd418b3b04", "size": "739", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "udp_client.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "3086" }, { "name": "Python", "bytes": "16519" } ], "symlink_target": "" }
<<<<<<< HEAD __all__ = ['db_available','add_to_db'] ======= import math >>>>>>> a454f3d2717b10f207860099d8466b8333988a38 # Third party imports import pandas # Local imports <<<<<<< HEAD from mendeley.optional import MissingModule from mendeley.optional import db #Optional Imports #------------------------------------ #pypub.paper_info from mendeley.optional import PaperInfo #TODO: This is a poor import name, fix this #pypub.scrapers from mendeley.optional import base_objects as obj #TODO: Enumerate errors from mendeley.errors import * #Public Interface #------------------------------------------------ db_available = type(db) is not MissingModule def add_to_db(info): """ Inputs ------ info : dict, dataframe entry, pypub entry """ paper_info = _make_paper_info(info) db.log_info(paper_info=paper_info) ======= from database import db_logging as db # TODO: Possibly copy these base classes into a file within mendeley_python from mendeley.optional import PaperInfo from mendeley.optional import base_objects as obj from mendeley.errors import * def add_to_db(info): paper_info = _make_paper_info(info) has_file = info.get('file_attached') db.log_info(paper_info=paper_info, has_file=has_file) >>>>>>> a454f3d2717b10f207860099d8466b8333988a38 def update_db_entry(info): new_info = _make_paper_info(info) # Get the saved information that exists for a given entry saved_info = db.get_saved_entry_obj(new_info) comparison_fields = saved_info.fields author_fields = saved_info.author_fields main_paper_id = saved_info.main_paper_id # Turn the new information into a combined dict new_full_dict = new_info.__dict__.copy() new_full_dict.update(new_info.entry.__dict__) if new_full_dict.get('authors') is not None: new_full_dict['authors'] = [author.__dict__ for author in new_full_dict['authors']] # Turn saved information into a combined dict saved_full_dict = saved_info.__dict__.copy() saved_full_dict.update(saved_info.entry.__dict__) if saved_full_dict.get('authors') is not None: saved_full_dict['authors'] = [author.__dict__ for author in saved_full_dict['authors']] updating_fields = [] updating_values = [] # Determine which fields have changed and need to be updated for field in comparison_fields: saved = saved_full_dict.get(field) new = new_full_dict.get(field) if saved == new: continue elif field == 'authors': # Each author is its own row in a separate Authors table. # This code replaces the saved bank of authors for a paper # with the new information. This covers creation and deletion # of authors, as well as updates to specific fields. for author in new: if author not in saved: db.add_author(author, main_paper_id=main_paper_id) for author in saved: if author not in new: db.delete_author(author, main_paper_id=main_paper_id) else: updating_fields.append(field) if saved is not None: updating_values.append(saved) else: updating_values.append(new) # Make the updating requests db.update_general_fields(new_full_dict.get('title'), updating_field=updating_fields, updating_value=updating_values, filter_by_title=True) <<<<<<< HEAD def add_reference(ref, main_doi, main_title): """ Inputs ------ """ db.add_reference(ref=ref, main_paper_doi=main_doi, main_paper_title=main_title) def update_reference_field(identifying_value, updating_field, updating_value, citing_doi=None, authors=None, filter_by_title=False, filter_by_doi=False, filter_by_authors=False): db.update_reference_field(identifying_value, updating_field, updating_value, citing_doi=citing_doi, authors=authors, filter_by_title=filter_by_title, filter_by_doi=filter_by_doi, ======= def update_entry_field(identifying_value, updating_field, updating_value, filter_by_title=False, filter_by_doi=False): db.update_entry_field(identifying_value, updating_field, updating_value, filter_by_title=filter_by_title, filter_by_doi=filter_by_doi) def add_reference(refs, main_doi, main_title=None): db.add_references(refs=refs, main_paper_doi=main_doi, main_paper_title=main_title) def update_reference_field(identifying_value, updating_field, updating_value, citing_doi=None, authors=None, filter_by_title=False, filter_by_doi=False, filter_by_authors=False): db.update_reference_field(identifying_value, updating_field, updating_value, citing_doi=citing_doi, authors=authors, filter_by_title=filter_by_title, filter_by_doi=filter_by_doi, >>>>>>> a454f3d2717b10f207860099d8466b8333988a38 filter_by_authors=filter_by_authors) def check_for_document(doi): try: docs = db.get_saved_info(doi) except MultipleDoiError: docs = None pass if docs is not None: return True else: return False def follow_refs_forward(doi): <<<<<<< HEAD """ """ return db.follow_refs_forward(doi) def _make_paper_info(info): """ Inputs ------ info : """ ======= return db.follow_refs_forward(doi) def check_multiple_constraints(params): # Params is a dict # first_key, first_value = params.popitem() # query_results = db.main_paper_search_wrapper(first_key, first_value) query_results = db.get_all_main_papers() for key, value in params.items(): temp = [] for result in query_results: search_value = getattr(result, key, '') if search_value is None: continue else: if value.lower() in search_value.lower(): temp.append(result) query_results = temp # query_results = [result for result in query_results if value.lower() in str(getattr(result, key, '')).lower()] if len(query_results) == 0: return None return query_results def delete_reference(ref): db.delete_reference(ref) def _make_paper_info(info): >>>>>>> a454f3d2717b10f207860099d8466b8333988a38 if isinstance(info, PaperInfo): return info elif isinstance(info, dict): paper_info = _mendeley_json_to_paper_info(info) return paper_info elif isinstance(info, pandas.core.series.Series): paper_info = _mendeley_df_to_paper_info(info) return paper_info else: raise TypeError('Information could not be formatted for database entry.') def _mendeley_df_to_paper_info(df_row): df_dict = df_row.to_dict() paper_info = PaperInfo() <<<<<<< HEAD ======= # Catch NaNs, which are default Pandas values for key in df_dict.keys(): if isinstance(df_dict.get(key), float): if math.isnan(df_dict.get(key)): df_dict[key] = None >>>>>>> a454f3d2717b10f207860099d8466b8333988a38 entry = obj.BaseEntry() entry.title = df_dict.get('title') entry.publication = df_dict.get('publisher') entry.year = df_dict.get('year') entry.volume = df_dict.get('volume') entry.issue = df_dict.get('issue') entry.pages = df_dict.get('pages') entry.keywords = df_dict.get('keywords') entry.abstract = df_dict.get('abstract') entry.notes = df_dict.get('notes') entry.pubmed_id = df_dict.get('pmid') entry.issn = df_dict.get('issn') # Formatting if entry.year is not None: entry.year = str(entry.year) if entry.keywords is not None and isinstance(entry.keywords, list): entry.keywords = ', '.join(entry.keywords) entry.authors = [] json_authors = df_dict.get('authors') if json_authors is not None: for auth in json_authors: author = obj.BaseAuthor() #TODO: This creates extra space if the first or last name is missing name = ' '.join([auth.get('first_name',''), auth.get('last_name','')]) author.name = name entry.authors.append(author) ids = df_dict.get('identifiers') if ids is not None: if 'doi' in ids.keys(): entry.doi = ids.get('doi') paper_info.doi = ids.get('doi') paper_info.entry = entry return paper_info def _mendeley_json_to_paper_info(json): paper_info = PaperInfo() entry = obj.BaseEntry() entry.title = json.get('title') entry.publication = json.get('publisher') entry.year = json.get('year') entry.volume = json.get('volume') entry.issue = json.get('issue') entry.pages = json.get('pages') entry.keywords = json.get('keywords') entry.abstract = json.get('abstract') entry.notes = json.get('notes') entry.authors = [] json_authors = json.get('authors') if json_authors is not None: for auth in json_authors: author = obj.BaseAuthor() name = ' '.join([auth.get('first_name',''), auth.get('last_name','')]) author.name = name entry.authors.append(author) ids = json.get('identifiers') if ids is not None: if 'doi' in ids.keys(): entry.doi = ids.get('doi') paper_info.doi = ids.get('doi') <<<<<<< HEAD ======= if 'pmid' in ids.keys(): entry.pubmed_id = ids.get('pmid') >>>>>>> a454f3d2717b10f207860099d8466b8333988a38 paper_info.entry = entry return paper_info
{ "content_hash": "3206fa006f67f20d0644d09a3690207e", "timestamp": "", "source": "github", "line_count": 313, "max_line_length": 120, "avg_line_length": 31.447284345047922, "alnum_prop": 0.6107893934775983, "repo_name": "ScholarTools/mendeley_python", "id": "ee8f828bfcac07c0014bcee9f9e572d231d4e04d", "size": "9843", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "mendeley/db_interface.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "192277" } ], "symlink_target": "" }
from django.db import models # No related name is needed here, since symmetrical relations are not # explicitly reversible. class SelfRefer(models.Model): name = models.CharField(max_length=10) references = models.ManyToManyField('self') related = models.ManyToManyField('self') def __unicode__(self): return self.name class Tag(models.Model): name = models.CharField(max_length=10) def __unicode__(self): return self.name # A related_name is required on one of the ManyToManyField entries here because # they are both addressable as reverse relations from Tag. class Entry(models.Model): name = models.CharField(max_length=10) topics = models.ManyToManyField(Tag) related = models.ManyToManyField(Tag, related_name="similar") def __unicode__(self): return self.name # Two models both inheriting from a base model with a self-referential m2m field class SelfReferChild(SelfRefer): pass class SelfReferChildSibling(SelfRefer): pass # Many-to-Many relation between models, where one of the PK's isn't an Autofield class Line(models.Model): name = models.CharField(max_length=100) class Worksheet(models.Model): id = models.CharField(primary_key=True, max_length=100) lines = models.ManyToManyField(Line, blank=True, null=True) __test__ = {"regressions": """ # Multiple m2m references to the same model or a different model must be # distinguished when accessing the relations through an instance attribute. >>> s1 = SelfRefer.objects.create(name='s1') >>> s2 = SelfRefer.objects.create(name='s2') >>> s3 = SelfRefer.objects.create(name='s3') >>> s1.references.add(s2) >>> s1.related.add(s3) >>> e1 = Entry.objects.create(name='e1') >>> t1 = Tag.objects.create(name='t1') >>> t2 = Tag.objects.create(name='t2') >>> e1.topics.add(t1) >>> e1.related.add(t2) >>> s1.references.all() [<SelfRefer: s2>] >>> s1.related.all() [<SelfRefer: s3>] >>> e1.topics.all() [<Tag: t1>] >>> e1.related.all() [<Tag: t2>] # The secret internal related names for self-referential many-to-many fields # shouldn't appear in the list when an error is made. >>> SelfRefer.objects.filter(porcupine='fred') Traceback (most recent call last): ... FieldError: Cannot resolve keyword 'porcupine' into field. Choices are: id, name, references, related, selfreferchild, selfreferchildsibling # Test to ensure that the relationship between two inherited models # with a self-referential m2m field maintains symmetry >>> sr_child = SelfReferChild(name="Hanna") >>> sr_child.save() >>> sr_sibling = SelfReferChildSibling(name="Beth") >>> sr_sibling.save() >>> sr_child.related.add(sr_sibling) >>> sr_child.related.all() [<SelfRefer: Beth>] >>> sr_sibling.related.all() [<SelfRefer: Hanna>] # Regression for #11311 - The primary key for models in a m2m relation # doesn't have to be an AutoField >>> w = Worksheet(id='abc') >>> w.save() >>> w.delete() """ }
{ "content_hash": "395c336d4d6a7337a3995ef2c62efbbd", "timestamp": "", "source": "github", "line_count": 97, "max_line_length": 140, "avg_line_length": 30.1340206185567, "alnum_prop": 0.708518645227506, "repo_name": "grangier/django-11599", "id": "913e719902885f92400f15132fbb6c09c77b85be", "size": "2923", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tests/regressiontests/m2m_regress/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "80589" }, { "name": "Python", "bytes": "4902708" }, { "name": "Shell", "bytes": "1608" } ], "symlink_target": "" }
import pytest from api.base.settings.defaults import API_BASE from osf_tests.factories import ( RegistrationProviderFactory, AuthUserFactory ) from django.contrib.auth.models import Group from osf.models import RegistrationSchema from waffle.models import Flag from osf.migrations import update_provider_auth_groups from osf.features import EGAP_ADMINS @pytest.mark.django_db class TestRegistrationProviderSchemas: @pytest.fixture() def user(self): return AuthUserFactory() @pytest.fixture() def egap_flag(self): flag = Flag.objects.get(name='egap_admins') flag.everyone = True flag.save() return flag @pytest.fixture() def schema(self): reg_schema = RegistrationSchema.objects.get(name='OSF Preregistration', schema_version=2) reg_schema.active = True reg_schema.save() return reg_schema @pytest.fixture() def egap_schema(self): schema = RegistrationSchema.objects.get(name='EGAP Registration', schema_version=3) schema.visible = True schema.active = True schema.save() return schema @pytest.fixture() def out_dated_schema(self): reg_schema = RegistrationSchema(name='Old Schema', schema_version=1) reg_schema.save() return reg_schema @pytest.fixture() def osf_reg_schema(self): osf_reg = RegistrationSchema.objects.get(name='OSF Preregistration', schema_version=3) osf_reg.visible = True osf_reg.active = True osf_reg.save() return osf_reg @pytest.fixture() def invisible_schema(self): reg_schema = RegistrationSchema(name='Test Schema (Invisible)', schema_version=1, visible=False) reg_schema.save() return reg_schema @pytest.fixture() def inactive_schema(self): reg_schema = RegistrationSchema(name='Test Schema (Inactive)', schema_version=1, active=False) reg_schema.save() return reg_schema @pytest.fixture() def provider(self, schema, out_dated_schema, invisible_schema, inactive_schema): provider = RegistrationProviderFactory() update_provider_auth_groups() provider.schemas.add(*[schema, out_dated_schema, invisible_schema, inactive_schema]) provider.save() return provider @pytest.fixture() def provider_with_v2_reg_only(self, schema): provider = RegistrationProviderFactory() update_provider_auth_groups() provider.schemas.add(schema) provider.save() return provider @pytest.fixture() def provider_with_egap_only(self, egap_schema): provider = RegistrationProviderFactory() update_provider_auth_groups() provider.schemas.add(egap_schema) provider.save() return provider @pytest.fixture() def provider_with_reg(self, osf_reg_schema, egap_schema, schema, out_dated_schema): provider = RegistrationProviderFactory() update_provider_auth_groups() provider.schemas.add(*[osf_reg_schema, schema, out_dated_schema, egap_schema]) provider.save() return provider @pytest.fixture def egap_admin(self): user = AuthUserFactory() user.save() flag = Flag.objects.create(name=EGAP_ADMINS) group = Group.objects.create(name=EGAP_ADMINS) # Just using the same name for convenience flag.groups.add(group) group.user_set.add(user) group.save() flag.save() return user @pytest.fixture() def url(self, provider): return f'/{API_BASE}providers/registrations/{provider._id}/schemas/' @pytest.fixture() def url_with_v2_reg_only(self, provider_with_v2_reg_only): return f'/{API_BASE}providers/registrations/{provider_with_v2_reg_only._id}/schemas/' @pytest.fixture() def url_with_egap_only(self, provider_with_egap_only): return f'/{API_BASE}providers/registrations/{provider_with_egap_only._id}/schemas/' @pytest.fixture() def url_with_reg(self, provider_with_reg): return f'/{API_BASE}providers/registrations/{provider_with_reg._id}/schemas/' def test_registration_provider_with_schema( self, app, url, schema, egap_schema, egap_admin, invisible_schema, user, url_with_v2_reg_only, url_with_egap_only ): res = app.get(url, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 3 assert schema._id in [item['id'] for item in data] assert invisible_schema._id in [item['id'] for item in data] assert schema.name in [item['attributes']['name'] for item in data] res = app.get(url_with_v2_reg_only, auth=egap_admin.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 1 assert data[0]['id'] == schema._id assert data[0]['attributes']['name'] == schema.name res = app.get(url_with_egap_only, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 0 def test_egap_registration_schema( self, app, user, egap_admin, egap_schema, url_with_egap_only ): res = app.get(url_with_egap_only, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 0 res = app.get(url_with_egap_only, auth=egap_admin.auth) assert res.status_code == 200 data = res.json['data'] assert len(data) == 1 assert data[0]['id'] == egap_schema._id assert data[0]['attributes']['name'] == egap_schema.name def test_registration_provider_with_default_schema( self, app, provider_with_reg, out_dated_schema, user, egap_schema, schema, url_with_reg, osf_reg_schema ): provider_with_reg.default_schema = osf_reg_schema provider_with_reg.save() res = app.get(url_with_reg, auth=user.auth) assert res.status_code == 200 data = res.json['data'] assert provider_with_reg.schemas.all().count() == 4 assert len(data) == 2 assert osf_reg_schema._id == data[0]['id'] assert schema.name in [item['attributes']['name'] for item in data]
{ "content_hash": "266b65ec24962e8ebd610d571f29d631", "timestamp": "", "source": "github", "line_count": 210, "max_line_length": 104, "avg_line_length": 31.34285714285714, "alnum_prop": 0.6110604679428745, "repo_name": "Johnetordoff/osf.io", "id": "fb2db1e0184d73e2c695ea0394f0ce43927151a9", "size": "6582", "binary": false, "copies": "3", "ref": "refs/heads/develop", "path": "api_tests/providers/registrations/views/test_registration_provider_schemas.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "93635" }, { "name": "Dockerfile", "bytes": "5876" }, { "name": "HTML", "bytes": "373738" }, { "name": "JavaScript", "bytes": "1596130" }, { "name": "Mako", "bytes": "679193" }, { "name": "Python", "bytes": "11587197" }, { "name": "Shell", "bytes": "2841" }, { "name": "VCL", "bytes": "13885" } ], "symlink_target": "" }
from warnings import warn from .takin import TakinTimeOfFlight, TakinTripleAxis from .tas_instrument import TripleAxisInstrument from .tof_instrument import TimeOfFlightInstrument class Instrument(object): r"""An object that represents either a Triple Axis Spectrometer instrument or a Time of Flight Instrument configuration, including a sample. Parameters ---------- args : arg, optional Any valid **positional** arguments for the desired instrument class instrument_type : str, optional Used to select Triple Axis instrument 'tas' or time of flight instrument 'tof'. Default: 'tas' engine : str, optional Used to select the engine for resolution calculations. Default: 'neutronpy' for 'tas' instruments, and 'takin' for 'tof' instruments. kwargs : kwarg, optional Any valid keyword arguments for the desired instrument class """ def __init__(self, *args, **kwargs): if 'instrument_type' not in kwargs: kwargs['instrument_type'] = 'tas' if 'engine' not in kwargs: kwargs['engine'] = 'neutronpy' self.instrument_type = kwargs['instrument_type'] self.engine = kwargs['engine'] if kwargs['instrument_type'] == 'tas': if kwargs['engine'] == 'neutronpy': self.__class__ = TripleAxisInstrument self.__init__(*args, **kwargs) elif kwargs['engine'] == 'takin': warn("Takin engine is not yet supported") self.__class__ = TakinTripleAxis self.__init__(*args, **kwargs) elif kwargs['instrument_type'] == 'tof': if kwargs['engine'] == 'neutronpy': self.__class__ = TimeOfFlightInstrument self.__init__(*args, **kwargs) elif kwargs['engine'] == 'takin': warn("Takin engine is not yet supported") self.__class__ = TakinTimeOfFlight self.__init__(*args, **kwargs) def __repr__(self): return "Instrument('{0}', engine='{1}')".format(self.instrument_type, self.engine)
{ "content_hash": "1fddc7c91ad525a88168241f7700e9c6", "timestamp": "", "source": "github", "line_count": 60, "max_line_length": 90, "avg_line_length": 35.93333333333333, "alnum_prop": 0.5978664192949907, "repo_name": "pseudocubic/neutronpy", "id": "928601db96d954f32ff7ab5fb8ff08748847e800", "size": "2180", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "neutronpy/instrument/instrument.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "236" }, { "name": "Makefile", "bytes": "1940" }, { "name": "Python", "bytes": "514916" }, { "name": "Shell", "bytes": "689" } ], "symlink_target": "" }
""" Command that will configure the neo4j connection. """ from rhobot.components.commands.base_command import BaseCommand from neo_backend.components.enums import GRAPH_URL_KEY, LOGIN_KEY, PASSWORD_KEY class ConfigureNeo4j(BaseCommand): name = 'configure_neo4j' description = 'Configure NEO4J' dependencies = BaseCommand.default_dependencies.union({'rho_bot_configuration'}) def post_init(self): super(ConfigureNeo4j, self).post_init() self._configuration = self.xmpp['rho_bot_configuration'] def command_start(self, request, initial_session): """ Start the command. :param request: :param initial_session: :return: """ graph_url = self._configuration.get_value(GRAPH_URL_KEY, '', persist_if_missing=False) login = self._configuration.get_value(LOGIN_KEY, '', persist_if_missing=False) form = self._forms.make_form() form.add_field(var='url', label='Graph Url', required=True, value=graph_url) form.add_field(var='login', label='Login', required=False, value=login) form.add_field(var='password', label='Password', required=False, ftype='private') initial_session['payload'] = form initial_session['next'] = self._store_configuration initial_session['has_next'] = False return initial_session def _store_configuration(self, payload, session): """ Store the configuration details. :param payload: :param session: :return: """ values = payload.get_values() storage_dictionary = { GRAPH_URL_KEY: values['url'] } if 'login' in values and values['login']: storage_dictionary[LOGIN_KEY] = values['login'] if 'password' in values and values['password']: storage_dictionary[PASSWORD_KEY] = values['password'] self._configuration.merge_configuration(storage_dictionary) session['payload'] = None session['next'] = None session['has_next'] = None return session configure_neo4j = ConfigureNeo4j
{ "content_hash": "dac78e582c9e7cb18ba153e2cce2ca1b", "timestamp": "", "source": "github", "line_count": 67, "max_line_length": 94, "avg_line_length": 31.850746268656717, "alnum_prop": 0.633552014995314, "repo_name": "rerobins/rho_neo_backend", "id": "ab8c2038853ada790d869ead9a68560b43a7be6c", "size": "2134", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "neo_backend/components/commands/configure_neo4j.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "26720" } ], "symlink_target": "" }
'''This file generates shell code for the setup.SHELL scripts to set environment variables''' from __future__ import print_function import argparse import copy import errno import os import platform import sys CATKIN_MARKER_FILE = '.catkin' system = platform.system() IS_DARWIN = (system == 'Darwin') IS_WINDOWS = (system == 'Windows') # subfolder of workspace prepended to CMAKE_PREFIX_PATH ENV_VAR_SUBFOLDERS = { 'CMAKE_PREFIX_PATH': '', 'CPATH': 'include', 'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')], 'PATH': 'bin', 'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')], 'PYTHONPATH': 'lib/python2.7/dist-packages', } def rollback_env_variables(environ, env_var_subfolders): ''' Generate shell code to reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH. This does not cover modifications performed by environment hooks. ''' lines = [] unmodified_environ = copy.copy(environ) for key in sorted(env_var_subfolders.keys()): subfolders = env_var_subfolders[key] if not isinstance(subfolders, list): subfolders = [subfolders] for subfolder in subfolders: value = _rollback_env_variable(unmodified_environ, key, subfolder) if value is not None: environ[key] = value lines.append(assignment(key, value)) if lines: lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH')) return lines def _rollback_env_variable(environ, name, subfolder): ''' For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder. :param subfolder: str '' or subfoldername that may start with '/' :returns: the updated value of the environment variable. ''' value = environ[name] if name in environ else '' env_paths = [path for path in value.split(os.pathsep) if path] value_modified = False if subfolder: if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)): subfolder = subfolder[1:] if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)): subfolder = subfolder[:-1] for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True): path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path path_to_remove = None for env_path in env_paths: env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path if env_path_clean == path_to_find: path_to_remove = env_path break if path_to_remove: env_paths.remove(path_to_remove) value_modified = True new_value = os.pathsep.join(env_paths) return new_value if value_modified else None def _get_workspaces(environ, include_fuerte=False, include_non_existing=False): ''' Based on CMAKE_PREFIX_PATH return all catkin workspaces. :param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool`` ''' # get all cmake prefix paths env_name = 'CMAKE_PREFIX_PATH' value = environ[env_name] if env_name in environ else '' paths = [path for path in value.split(os.pathsep) if path] # remove non-workspace paths workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))] return workspaces def prepend_env_variables(environ, env_var_subfolders, workspaces): ''' Generate shell code to prepend environment variables for the all workspaces. ''' lines = [] lines.append(comment('prepend folders of workspaces to environment variables')) paths = [path for path in workspaces.split(os.pathsep) if path] prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '') lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix)) for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']): subfolder = env_var_subfolders[key] prefix = _prefix_env_variable(environ, key, paths, subfolder) lines.append(prepend(environ, key, prefix)) return lines def _prefix_env_variable(environ, name, paths, subfolders): ''' Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items. ''' value = environ[name] if name in environ else '' environ_paths = [path for path in value.split(os.pathsep) if path] checked_paths = [] for path in paths: if not isinstance(subfolders, list): subfolders = [subfolders] for subfolder in subfolders: path_tmp = path if subfolder: path_tmp = os.path.join(path_tmp, subfolder) # exclude any path already in env and any path we already added if path_tmp not in environ_paths and path_tmp not in checked_paths: checked_paths.append(path_tmp) prefix_str = os.pathsep.join(checked_paths) if prefix_str != '' and environ_paths: prefix_str += os.pathsep return prefix_str def assignment(key, value): if not IS_WINDOWS: return 'export %s="%s"' % (key, value) else: return 'set %s=%s' % (key, value) def comment(msg): if not IS_WINDOWS: return '# %s' % msg else: return 'REM %s' % msg def prepend(environ, key, prefix): if key not in environ or not environ[key]: return assignment(key, prefix) if not IS_WINDOWS: return 'export %s="%s$%s"' % (key, prefix, key) else: return 'set %s=%s%%%s%%' % (key, prefix, key) def find_env_hooks(environ, cmake_prefix_path): ''' Generate shell code with found environment hooks for the all workspaces. ''' lines = [] lines.append(comment('found environment hooks in workspaces')) generic_env_hooks = [] generic_env_hooks_workspace = [] specific_env_hooks = [] specific_env_hooks_workspace = [] generic_env_hooks_by_filename = {} specific_env_hooks_by_filename = {} generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh' specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None # remove non-workspace paths workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))] for workspace in reversed(workspaces): env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d') if os.path.isdir(env_hook_dir): for filename in sorted(os.listdir(env_hook_dir)): if filename.endswith('.%s' % generic_env_hook_ext): # remove previous env hook with same name if present if filename in generic_env_hooks_by_filename: i = generic_env_hooks.index(generic_env_hooks_by_filename[filename]) generic_env_hooks.pop(i) generic_env_hooks_workspace.pop(i) # append env hook generic_env_hooks.append(os.path.join(env_hook_dir, filename)) generic_env_hooks_workspace.append(workspace) generic_env_hooks_by_filename[filename] = generic_env_hooks[-1] elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext): # remove previous env hook with same name if present if filename in specific_env_hooks_by_filename: i = specific_env_hooks.index(specific_env_hooks_by_filename[filename]) specific_env_hooks.pop(i) specific_env_hooks_workspace.pop(i) # append env hook specific_env_hooks.append(os.path.join(env_hook_dir, filename)) specific_env_hooks_workspace.append(workspace) specific_env_hooks_by_filename[filename] = specific_env_hooks[-1] env_hooks = generic_env_hooks + specific_env_hooks env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace count = len(env_hooks) lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count)) for i in range(count): lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i])) lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i])) return lines def _parse_arguments(args=None): parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.') parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context') return parser.parse_known_args(args=args)[0] if __name__ == '__main__': try: try: args = _parse_arguments() except Exception as e: print(e, file=sys.stderr) sys.exit(1) # environment at generation time CMAKE_PREFIX_PATH = '/opt/ros/indigo'.split(';') # prepend current workspace if not already part of CPP base_path = os.path.dirname(__file__) if base_path not in CMAKE_PREFIX_PATH: CMAKE_PREFIX_PATH.insert(0, base_path) CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH) environ = dict(os.environ) lines = [] if not args.extend: lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS) lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH) lines += find_env_hooks(environ, CMAKE_PREFIX_PATH) print('\n'.join(lines)) # need to explicitly flush the output sys.stdout.flush() except IOError as e: # and catch potantial "broken pipe" if stdout is not writable # which can happen when piping the output to a file but the disk is full if e.errno == errno.EPIPE: print(e, file=sys.stderr) sys.exit(2) raise sys.exit(0)
{ "content_hash": "d4c932f078c184c55300f27a98b9b5ca", "timestamp": "", "source": "github", "line_count": 253, "max_line_length": 213, "avg_line_length": 41.98814229249012, "alnum_prop": 0.6395556810693778, "repo_name": "YinYangOfDao/ComputerVision", "id": "dce614e238d84c6a9e9e853bf00d276732e7832e", "size": "12271", "binary": false, "copies": "18", "ref": "refs/heads/master", "path": "pourwater/build/catkin_generated/installspace/_setup_util.py", "mode": "33261", "license": "mit", "language": [ { "name": "C++", "bytes": "15286" }, { "name": "CMake", "bytes": "19355" }, { "name": "Python", "bytes": "36812" }, { "name": "Shell", "bytes": "9033" } ], "symlink_target": "" }
############################################################################## # # Kennedy Institute of Rheumatology # # $Id$ # # Copyright (C) 2015 Stephen Sansom # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. ############################################################################### """ =========================== Pipeline cram2fastq =========================== :Author: Stephen Sansom :Release: $Id$ :Date: |today| :Tags: Python Overview ======== This pipeline coverts Sanger CRAM files to fastq.gz, optionally quality trimming and reconciling the fastq files Usage ===== See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general information how to use CGAT pipelines. Configuration ------------- The pipeline requires a configured :file:`pipeline.ini` file. CGATReport report requires a :file:`conf.py` and optionally a :file:`cgatreport.ini` file (see :ref:`PipelineReporting`). Default configuration files can be generated by executing: python <srcdir>/pipeline_cram2fastq.py config Input files ----------- Requirements ------------ On top of the default CGAT setup, the pipeline requires the following software to be in the path: .. Add any additional external requirements such as 3rd party software or R modules below: Requirements: * samtools >= 1.1 Pipeline output =============== Glossary ======== Code ==== """ from ruffus import * import sys import os import glob import sqlite3 from CGATCore import Experiment as E from CGATCore import Pipeline as P from CGATCore import Database as DB import pysam # -------------------------- < parse parameters > --------------------------- # # load options from the config file PARAMS = P.get_parameters( ["%s/pipeline.yml" % os.path.splitext(__file__)[0], "../pipeline.yml", "pipeline.yml"]) # ----------------------- < pipeline configuration > ------------------------ # if len(sys.argv) > 1: if(sys.argv[1] == "config") and __name__ == "__main__": sys.exit(P.main(sys.argv)) # ------------------------< specific pipeline tasks >------------------------ # @follows(mkdir("validate.cram.dir")) @transform(glob.glob("data.dir/*.cram"), regex(r".*/(.*).cram"), [r"validate.cram.dir/\1.validate", r"validate.cram.dir/\1.quality"]) def validateCramFiles(infile, outfiles): '''Validate CRAM files by exit status of cramtools qstat. Save the quality scores of cram files. ''' outfile, outfile_quality = outfiles statement = '''temp_quality=`mktemp -p %(cluster_tmpdir)s`; cramtools qstat -I %(infile)s > $temp_quality; echo $? > %(outfile)s; cat $temp_quality | awk '{OFS="\\t"} {print $1,$2}' > %(outfile_quality)s; rm $temp_quality; ''' P.run(statement) @follows(validateCramFiles) @merge(validateCramFiles, "validate.cram.dir/summary.txt") def inspectValidations(infiles, outfile): '''Check that all crams pass validation or raise an Error.''' validation_files = [fn for filenames in infiles for fn in filenames if fn.endswith(".validate")] outfile_handle = open(outfile, "w") exit_states = [] for validation_file in validation_files: with open(validation_file, "r") as vf_handle: exit_status = vf_handle.read().strip("\n") exit_states.append(int(exit_status)) outfile_handle.write("\t".join([validation_file, exit_status])+"\n") outfile_handle.close() if sum(exit_states) != 0: raise ValueError("One or more cram files failed validation") @follows(validateCramFiles) @merge(validateCramFiles, "validate.cram.dir/cram_quality.load") def loadCramQuality(infiles, outfile): ''' Load the quality scores for the different cells into the database (summarized table). ''' quality_files = [fn for filenames in infiles for fn in filenames if fn.endswith(".quality")] P.concatenate_and_load(quality_files, outfile, regex_filename="validate.cram.dir/(.*).quality", cat="track", has_titles=False, header="cramID,number_reads,cram_quality_score") @follows(inspectValidations, mkdir("cell.info.dir")) @merge(glob.glob("data.dir/*.cram"), "cell.info.dir/cells.txt") def extractSampleInformation(infiles, outfile): '''Make a table of cells and corresponding cram files''' # build a dictionary of cell to cram file mappings cells = {} for cram_file in infiles: cram = pysam.AlignmentFile(cram_file, "rc") print(cram.header) cell = cram.header["RG"][0]["SM"] if cell not in cells.keys(): cells[cell] = [cram_file] else: cells[cell].append(cram_file) cram.close() # write out a per-cell list of cram files outdir = os.path.dirname(outfile) outfile_handle = open(outfile, "w") outfile_handle.write("#cell\tcram_files\n") for cell in cells.keys(): outfile_handle.write("\t".join([cell, ",".join(cells[cell])])+"\n") outfile_handle.close() @split(extractSampleInformation, "cell.info.dir/*.cell") def cellCramLists(infile, outfiles): '''Make a per-cell file containing the cram file(s) corresponding to the cell''' out_dir = os.path.dirname(infile) with open(infile, "r") as cell_list: for record in cell_list: if record.startswith("#"): continue cell, cram_list = record.strip("\n").split("\t") crams = cram_list.split(",") cell_outfile_name = os.path.join(out_dir, cell+".cell") with open(cell_outfile_name, "w") as cell_file_handle: for cram in crams: cell_file_handle.write(cram+"\n") @follows(mkdir("fastq.dir"), mkdir("fastq.temp.dir"), extractSampleInformation) @transform(cellCramLists, regex(r".*/(.*).cell"), (r"fastq.dir/\1.fastq.1.gz", r"fastq.dir/\1.fastq.2.gz")) def cram2fastq(infile, outfiles): '''Convert Sanger CRAM files to Fastq format Takes care of merging, quality trimming and pair reconciliation. Intermediate files are not kept by default.''' # TODO: make quality trimming optional. ################################### # set variables and open a log file ################################### cell_name = os.path.basename(infile)[:-len(".cell")] out_dir = os.path.dirname(outfiles[0]) temp_dir = "fastq.temp.dir" log_file = os.path.join(temp_dir, cell_name + ".fastq.extraction.log") log = open(log_file, "w") log.write("Fastq extraction log file for %(infile)s\n\n") def _merge_dicts(a, b): x = a.copy() x.update(b) return(x) temp_files = [] # ############################################## # Extract per-end Fastq(s) from the cram file(s) # ############################################## raw_fastq_names = [] with open(infile, "rb") as cram_files: for line in cram_files: cram = line.strip() cram_basename = os.path.basename(cram)[:-len(".cram")] raw_fastq_name = os.path.join(temp_dir, cram_basename) raw_fastq_names.append(raw_fastq_name) job_memory = PARAMS["preprocess_memory"] statement = '''cramtools fastq --enumerate --reverse -F %(raw_fastq_name)s -I %(cram)s --gzip ''' log.write("Extracting fastqs from %(cram)s:" % locals() + "\n") log.write(statement % locals() + "\n") P.run(statement) log.write("done.\n\n") # #################################### # Perform quality trimming # Merging is also taken care of here. # #################################### quality = PARAMS["preprocess_quality_threshold"] minlen = PARAMS["preprocess_min_length"] trim = PARAMS["preprocess_trim"] trimmed_fastq_prefix = os.path.join(temp_dir, cell_name) trimmed_fastq_files = [] # fastq(s) for each end are quality trimmed separately for end in ["_1", "_2"]: raw_fastqs = [x + end + ".fastq.gz" for x in raw_fastq_names] temp_files += raw_fastqs fastq_list = " ".join(raw_fastqs) trimmed_fastq_name = trimmed_fastq_prefix + end + ".trimmed.fastq.gz" trimmed_fastq_files.append(trimmed_fastq_name) log.write(">> Quality trimming %(fastq_list)s: " % locals() + "\n") if trim: statement = '''zcat %(fastq_list)s | fastq_quality_trimmer -Q33 -t %(quality)s -l %(minlen)s | gzip -c > %(trimmed_fastq_name)s ''' else: statement = '''zcat %(fastq_list)s | gzip -c > %(trimmed_fastq_name)s ''' log.write(statement % _merge_dicts(PARAMS, locals()) + "\n") P.run(statement) log.write("done. \n\n") # ################## # Reconcile the ends # ################## if PARAMS["preprocess_reconcile"] != "False": temp_files += trimmed_fastq_files end1, end2 = trimmed_fastq_files reconciled_fastq_prefix = outfiles[0][:-len(".1.gz")] log.write(">> Reconciling pairs, %(end1)s & %(end2)s: " % locals() + "\n") statement = '''python %(scriptsdir)s/fastqs2fastqs.py %(end1)s %(end2)s --method reconcile --chop --unpaired -o "%(reconciled_fastq_prefix)s.%%s.gz"; ''' log.write(statement % _merge_dicts(PARAMS, locals()) + "\n") P.run(statement) log.write("done\n\n") else: trimmed_fastq_prefix = outfiles[0][:-len(".1.gz")] for end in trimmed_fastq_files: if "1.trimmed" in end: endn = "1" else: endn = "2" trimmed_end_name = ".".join([trimmed_fastq_prefix, endn, "gz"]) os.symlink(os.path.abspath(end), trimmed_end_name) ############################## # Clean up the temporary files ############################## if PARAMS["keep_temporary"] == 0: temp_file_list = " ".join(temp_files) # record files sizes and md5 checksums of the temporary files log.write(">> Recording sizes and checksums of temporary files:\n") statement = '''ls -l %(temp_file_list)s > %(temp_dir)s/%(cell_name)s.ls; checkpoint; md5sum %(temp_file_list)s > %(temp_dir)s/%(cell_name)s.md5; ''' log.write(statement % locals() + "\n") P.run(statement) log.write("done\n\n") # unlink (delete) the temporary files log.write(">> unlinking temporary files: " + temp_file_list + "\n") for temp_file in temp_files: os.unlink(temp_file) log.write("tempororay files unlinked\n") log.close() # ---------------------< generic pipeline tasks >---------------------------- # @follows(cram2fastq, loadCramQuality) def full(): pass # ########################################################################### # if __name__ == "__main__": sys.exit(P.main(sys.argv))
{ "content_hash": "a452bb32c7f674d56bff582c40c070ac", "timestamp": "", "source": "github", "line_count": 427, "max_line_length": 79, "avg_line_length": 29.807962529274004, "alnum_prop": 0.5246700188560653, "repo_name": "snsansom/scseq", "id": "2cc4848b76a53a3bac0a574be981e9ba529e9e40", "size": "12728", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pipelines/pipeline_cram2fastq.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "281011" }, { "name": "Python", "bytes": "83180" }, { "name": "R", "bytes": "3225" } ], "symlink_target": "" }
try: import google.appengine.ext.blobstore as blobstore from google.appengine.ext.blobstore.blobstore import BlobReferenceProperty import google.appengine.ext.db as db import google.appengine.ext.webapp as webapp import google.appengine.api.files as files import google.appengine.api.memcache as memcache import google.appengine.api.urlfetch as urlfetch # Default to a 5 minute cache timeout. CACHE_TIMEOUT = 300 except ImportError: # Cache for one second because zero means cache forever. CACHE_TIMEOUT = 1 import re from StringIO import StringIO FAKE_URL_FETCHER_CONFIGURATION = None def ConfigureFakeUrlFetch(configuration): """|configuration| is a dictionary mapping strings to fake urlfetch classes. A fake urlfetch class just needs to have a fetch method. The keys of the dictionary are treated as regex, and they are matched with the URL to determine which fake urlfetch is used. """ global FAKE_URL_FETCHER_CONFIGURATION FAKE_URL_FETCHER_CONFIGURATION = dict( (re.compile(k), v) for k, v in configuration.iteritems()) def _GetConfiguration(key): if not FAKE_URL_FETCHER_CONFIGURATION: raise ValueError('No fake fetch paths have been configured. ' 'See ConfigureFakeUrlFetch in appengine_wrappers.py.') for k, v in FAKE_URL_FETCHER_CONFIGURATION.iteritems(): if k.match(key): return v return None class _RPC(object): def __init__(self, result=None): self.result = result def get_result(self): return self.result class FakeUrlFetch(object): """A fake urlfetch module that uses the current |FAKE_URL_FETCHER_CONFIGURATION| to map urls to fake fetchers. """ class DownloadError(Exception): pass class _Response(object): def __init__(self, content): self.content = content self.headers = { 'content-type': 'none' } self.status_code = 200 def fetch(self, url, **kwargs): response = self._Response(_GetConfiguration(url).fetch(url)) if response.content is None: response.status_code = 404 return response def create_rpc(self): return _RPC() def make_fetch_call(self, rpc, url, **kwargs): rpc.result = self.fetch(url) urlfetch = FakeUrlFetch() class NotImplemented(object): def __getattr__(self, attr): raise NotImplementedError() _BLOBS = {} class FakeBlobstore(object): class BlobReader(object): def __init__(self, blob_key): self._data = _BLOBS[blob_key].getvalue() def read(self): return self._data blobstore = FakeBlobstore() class FakeFileInterface(object): """This class allows a StringIO object to be used in a with block like a file. """ def __init__(self, io): self._io = io def __exit__(self, *args): pass def write(self, data): self._io.write(data) def __enter__(self, *args): return self._io class FakeFiles(object): _next_blobstore_key = 0 class blobstore(object): @staticmethod def create(): FakeFiles._next_blobstore_key += 1 return FakeFiles._next_blobstore_key @staticmethod def get_blob_key(filename): return filename def open(self, filename, mode): _BLOBS[filename] = StringIO() return FakeFileInterface(_BLOBS[filename]) def GetBlobKeys(self): return _BLOBS.keys() def finalize(self, filename): pass files = FakeFiles() class InMemoryMemcache(object): """A fake memcache that does nothing. """ class Client(object): def set_multi_async(self, mapping, namespace='', time=0): return def get_multi_async(self, keys, namespace='', time=0): return _RPC(result=dict((k, None) for k in keys)) def set(self, key, value, namespace='', time=0): return def get(self, key, namespace='', time=0): return None def delete(self, key, namespace): return memcache = InMemoryMemcache() class webapp(object): class RequestHandler(object): """A fake webapp.RequestHandler class for Handler to extend. """ def __init__(self, request, response): self.request = request self.response = response def redirect(self, path): self.request.path = path class _Db_Result(object): def __init__(self, data): self._data = data class _Result(object): def __init__(self, value): self.value = value def get(self): return self._Result(self._data) class db(object): _store = {} class StringProperty(object): pass class Model(object): def __init__(self, key_='', value=''): self._key = key_ self._value = value @staticmethod def gql(query, key): return _Db_Result(db._store.get(key, None)) def put(self): db._store[self._key] = self._value class BlobReferenceProperty(object): pass
{ "content_hash": "753cdec5d862019d9defc53b7f5e7a7a", "timestamp": "", "source": "github", "line_count": 187, "max_line_length": 80, "avg_line_length": 26.77540106951872, "alnum_prop": 0.6395046934291991, "repo_name": "nacl-webkit/chrome_deps", "id": "34956b3ab3ea4fddfe6ae46f6b13ece160d9a8c6", "size": "5326", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "chrome/common/extensions/docs/server2/appengine_wrappers.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ASP", "bytes": "853" }, { "name": "AppleScript", "bytes": "6973" }, { "name": "Arduino", "bytes": "464" }, { "name": "Assembly", "bytes": "1173441" }, { "name": "Awk", "bytes": "9519" }, { "name": "C", "bytes": "74568368" }, { "name": "C#", "bytes": "1132" }, { "name": "C++", "bytes": "156174457" }, { "name": "DOT", "bytes": "1559" }, { "name": "F#", "bytes": "381" }, { "name": "Java", "bytes": "3088381" }, { "name": "JavaScript", "bytes": "18179048" }, { "name": "Logos", "bytes": "4517" }, { "name": "M", "bytes": "2190" }, { "name": "Matlab", "bytes": "3044" }, { "name": "Objective-C", "bytes": "6965520" }, { "name": "PHP", "bytes": "97817" }, { "name": "Perl", "bytes": "932725" }, { "name": "Python", "bytes": "8458718" }, { "name": "R", "bytes": "262" }, { "name": "Ragel in Ruby Host", "bytes": "3621" }, { "name": "Shell", "bytes": "1526176" }, { "name": "Tcl", "bytes": "277077" }, { "name": "XSLT", "bytes": "13493" } ], "symlink_target": "" }
"""Nullable types Revision ID: edec8846424d Revises: 57d748d33c22 Create Date: 2017-09-07 15:01:41.329026 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'edec8846424d' down_revision = '57d748d33c22' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('account_sharing_types') as batch_op: batch_op.alter_column('owner_id', nullable=True) with op.batch_alter_table('account_types') as batch_op: batch_op.alter_column('owner_id', nullable=True) with op.batch_alter_table('transaction_types') as batch_op: batch_op.alter_column('owner_id', nullable=True) with op.batch_alter_table('transaction_tags') as batch_op: batch_op.alter_column('owner_id', nullable=True) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('account_sharing_types') as batch_op: batch_op.alter_column('owner_id', nullable=False) with op.batch_alter_table('account_types') as batch_op: batch_op.alter_column('owner_id', nullable=False) with op.batch_alter_table('transaction_types') as batch_op: batch_op.alter_column('owner_id', nullable=False) with op.batch_alter_table('transaction_tags') as batch_op: batch_op.alter_column('owner_id', nullable=False) # ### end Alembic commands ###
{ "content_hash": "4c548269b446b2e95640a72c4f0cf25a", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 67, "avg_line_length": 36.61904761904762, "alnum_prop": 0.6723016905071522, "repo_name": "csdt/Pawi", "id": "71d07d95d376410f9b3ca3fd676dafc6735fd844", "size": "1538", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "migrations/versions/edec8846424d_.py", "mode": "33188", "license": "mit", "language": [ { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "34529" } ], "symlink_target": "" }
import os import sys import rest_framework_jwt from setuptools import setup name = 'djangorestframework-jwt' version = rest_framework_jwt.__version__ package = 'rest_framework_jwt' description = 'JSON Web Token based authentication for Django REST framework' url = 'https://github.com/GetBlimp/django-rest-framework-jwt' author = 'Jose Padilla' author_email = 'jpadilla@getblimp.com' license = 'MIT' install_requires = [ 'PyJWT>=1.1.0,<2.0.0', ] def read(*paths): """ Build a file path from paths and return the contents. """ with open(os.path.join(*paths), 'r') as f: return f.read() def get_packages(package): """ Return root package and all sub-packages. """ return [dirpath for dirpath, dirnames, filenames in os.walk(package) if os.path.exists(os.path.join(dirpath, '__init__.py'))] def get_package_data(package): """ Return all files under the root package, that are not in a package themselves. """ walk = [(dirpath.replace(package + os.sep, '', 1), filenames) for dirpath, dirnames, filenames in os.walk(package) if not os.path.exists(os.path.join(dirpath, '__init__.py'))] filepaths = [] for base, filenames in walk: filepaths.extend([os.path.join(base, filename) for filename in filenames]) return {package: filepaths} if sys.argv[-1] == 'publish': os.system("python setup.py sdist upload") os.system("python setup.py bdist_wheel upload") print("You probably want to also tag the version now:") print(" git tag -a {0} -m 'version {0}'".format(version)) print(" git push --tags") sys.exit() setup( name=name, version=version, url=url, license=license, description=description, long_description=read('README.rst'), author=author, author_email=author_email, packages=get_packages(package), package_data=get_package_data(package), install_requires=install_requires, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Internet :: WWW/HTTP', ] )
{ "content_hash": "f044c5612899d75f7e8f4fbec5f74be2", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 77, "avg_line_length": 29.141304347826086, "alnum_prop": 0.6247668780305856, "repo_name": "erichonkanen/django-rest-framework-jwt", "id": "cb8a1896c3993843f54009d9e27a636947348a48", "size": "2728", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "53687" } ], "symlink_target": "" }
__revision__ = "test/scons-time/obj/objglob.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog" """ Verify that the obj subcommand globs for files. """ import TestSCons_time test = TestSCons_time.TestSCons_time() lines = [ ' pre-read post-read pre-build post-build\n' ] line_fmt = ' 601%(i)s 602%(i)s 603%(i)s 604%(i)s %(logfile_name)s\n' for i in range(9): logfile_name = 'foo-%s.log' % i test.fake_logfile(logfile_name, i) lines.append(line_fmt % locals()) expect = ''.join(lines) test.run(arguments = 'obj Builder.BuilderBase foo-*.log', stdout = expect) test.run(arguments = 'obj Builder.BuilderBase foo-?.log', stdout = expect) test.pass_test() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
{ "content_hash": "fe375e23c0f11db61f5fe3948fb6fedb", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 104, "avg_line_length": 24.676470588235293, "alnum_prop": 0.6424314660309892, "repo_name": "EmanueleCannizzaro/scons", "id": "c43474701ff84289a96c95880392c7be6c271d08", "size": "1974", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/scons-time/obj/objglob.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "2491" }, { "name": "C", "bytes": "659" }, { "name": "C++", "bytes": "598" }, { "name": "CSS", "bytes": "18502" }, { "name": "D", "bytes": "1997" }, { "name": "HTML", "bytes": "817651" }, { "name": "Java", "bytes": "6860" }, { "name": "JavaScript", "bytes": "215495" }, { "name": "Makefile", "bytes": "3795" }, { "name": "Perl", "bytes": "29978" }, { "name": "Python", "bytes": "7510453" }, { "name": "Roff", "bytes": "556545" }, { "name": "Ruby", "bytes": "11074" }, { "name": "Shell", "bytes": "52682" }, { "name": "XSLT", "bytes": "7567242" } ], "symlink_target": "" }
""" wiki ~~~~~~~~~ writing module for markdown format similar wiki. :copyright: (c) 2014 by geeksaga. :license: MIT LICENSE 2.0, see license for more details. """ from flask import Flask, session, redirect, request, url_for from flask import render_template, abort from jinja2 import TemplateNotFound from werkzeug import generate_password_hash from wtforms import Form, TextField, PasswordField, HiddenField, validators from ..blueprint import frontend from ..util.logger import Log from ..model.database import DBManager as db from ..model.user import User import markdown @frontend.route('/user') @frontend.route('/user/<name>', methods=['GET', 'POST']) def show(name=None): try: if request.method == 'POST': return "Hello World2!" return markdown.markdown(render_template("index.html", name=name)) except TemplateNotFound: abort(404) @frontend.route('/signup', methods=['GET']) @frontend.route('/user/signup', methods=['GET']) def signupForm(): form = RegistrationForm(request.form) try: return render_template('signup.html', form = form) except TemplateNotFound: abort(404) @frontend.route('/signup', methods=['POST']) @frontend.route('/user/signup', methods=['POST']) def signup(): form = RegistrationForm(request.form) if form.validate(): email = form.email.data username = form.username.data password = form.password.data try: user = User(email, username, generate_password_hash(password)) db.session().add(user) db.session().commit() except Exception as e: error = "DB error occurs : " + str(e) Log.error(error) db.session().rollback() raise e else: return redirect(url_for('.login', email=email)) else: return render_template('signup.html', form = form) class RegistrationForm(Form): email = TextField('email', [validators.Required('Email을 입력하세요.'), validators.Length(min=7, max=100, message='7자리 이상 100자리 이하로 입력하세요.')]) username = TextField('username', [validators.Required('이름을 입력하세요.'), validators.Length(min=2, max=50, message='2자리 이상 50자리 이하로 입력하세요.')]) password = PasswordField('New Password', [validators.Required('비밀번호를 입력하세요.'), validators.Length(min=5, max=100, message='6자리 이상 100자리 이하로 입력하세요.')]) next_url = HiddenField('Next URL')
{ "content_hash": "94dac852394368c108540f74a23cf1f1", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 89, "avg_line_length": 33.88607594936709, "alnum_prop": 0.5980575270825551, "repo_name": "geekflow/archive", "id": "308671f2ce5d3a2c83936a75f1915c07d1b16ff0", "size": "2853", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "geeksaga/archive/controller/user.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3267" }, { "name": "HTML", "bytes": "12556" }, { "name": "Python", "bytes": "235576" } ], "symlink_target": "" }
import os import argparse import logging from genie import input_to_database from genie import write_invalid_reasons from genie import process_functions logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def main(process, project_id, center=None, pemfile=None, delete_old=False, only_validate=False, oncotree_link=None, create_new_maf_database=False, debug=False, reference=None, vcf2maf_path=None, vep_path=None, vep_data=None): syn = process_functions.synLogin(pemfile, debug=debug) # Must specify correct paths to vcf2maf, VEP and VEP data # if trying to process vcf, maf and mafSP if process in ['vcf', 'maf', 'mafSP'] and not only_validate: assert os.path.exists(vcf2maf_path), ( "Path to vcf2maf (--vcf2mafPath) must be specified " "if `--process {vcf,maf,mafSP}` is used") assert os.path.exists(vep_path), ( "Path to VEP (--vepPath) must be specified " "if `--process {vcf,maf,mafSP}` is used") assert os.path.exists(vep_data), ( "Path to VEP data (--vepData) must be specified " "if `--process {vcf,maf,mafSP}` is used") # Get the Synapse Project where data is stored # Should have annotations to find the table lookup project = syn.get(project_id) database_to_synid_mapping_synid = project.annotations.get("dbMapping", "") databaseToSynIdMapping = syn.tableQuery( 'SELECT * FROM {}'.format(database_to_synid_mapping_synid[0])) databaseToSynIdMappingDf = databaseToSynIdMapping.asDataFrame() center_mapping_id = process_functions.getDatabaseSynId( syn, "centerMapping", databaseToSynIdMappingDf=databaseToSynIdMappingDf) center_mapping = syn.tableQuery('SELECT * FROM %s' % center_mapping_id) center_mapping_df = center_mapping.asDataFrame() if center is not None: assert center in center_mapping_df.center.tolist(), ( "Must specify one of these centers: {}".format( ", ".join(center_mapping_df.center))) centers = [center] else: center_mapping_df = \ center_mapping_df[~center_mapping_df['inputSynId'].isnull()] # release is a bool column center_mapping_df = center_mapping_df[center_mapping_df['release']] centers = center_mapping_df.center if oncotree_link is None: onco_link = databaseToSynIdMappingDf['Id'][ databaseToSynIdMappingDf['Database'] == 'oncotreeLink'].values[0] onco_link_ent = syn.get(onco_link) oncotree_link = onco_link_ent.externalURL # Check if you can connect to oncotree link, # if not then don't run validation / processing process_functions.checkUrl(oncotree_link) center_mapping_ent = syn.get(center_mapping_id) if center_mapping_ent.get('isProcessing', ['True'])[0] == 'True': raise Exception( "Processing/validation is currently happening. " "Please change/add the 'isProcessing' annotation on {} " "to False to enable processing".format(center_mapping_id)) else: center_mapping_ent.isProcessing = "True" center_mapping_ent = syn.store(center_mapping_ent) # remove this query timeout and see what happens # syn.table_query_timeout = 50000 # Create new maf database, should only happen once if its specified if create_new_maf_database: databaseToSynIdMappingDf = \ input_to_database.create_and_archive_maf_database(syn, databaseToSynIdMappingDf) for center in centers: input_to_database.center_input_to_database( syn, project_id, center, process, only_validate, vcf2maf_path, vep_path, vep_data, databaseToSynIdMappingDf, center_mapping_df, reference=reference, delete_old=delete_old, oncotree_link=oncotree_link) # To ensure that this is the new entity center_mapping_ent = syn.get(center_mapping_id) center_mapping_ent.isProcessing = "False" center_mapping_ent = syn.store(center_mapping_ent) error_tracker_synid = process_functions.getDatabaseSynId( syn, "errorTracker", databaseToSynIdMappingDf=databaseToSynIdMappingDf) # Only write out invalid reasons if the center # isnt specified and if only validate if center is None and only_validate: logger.info("WRITING INVALID REASONS TO CENTER STAGING DIRS") write_invalid_reasons.write_invalid_reasons( syn, center_mapping_df, error_tracker_synid) if __name__ == "__main__": ''' Argument parsers TODO: Fix case of arguments ''' parser = argparse.ArgumentParser( description='GENIE center ') parser.add_argument( "process", choices=['vcf', 'maf', 'main', 'mafSP'], help='Process vcf, maf or the rest of the files') parser.add_argument( "--project_id", help="Synapse Project ID where data is stored.", required=True) parser.add_argument( '--center', help='The centers') parser.add_argument( "--pemFile", type=str, help="Path to PEM file (genie.pem)") parser.add_argument( "--deleteOld", action='store_true', help="Delete all old processed and temp files") parser.add_argument( "--onlyValidate", action='store_true', help="Only validate the files, don't process") parser.add_argument( "--oncotree_link", type=str, help="Link to oncotree code") parser.add_argument( "--createNewMafDatabase", action='store_true', help="Creates a new maf database") parser.add_argument( "--debug", action='store_true', help="Add debug mode to synapse") parser.add_argument( "--reference", type=str, help="Path to VCF reference file") # DEFAULT PARAMS parser.add_argument( "--vcf2mafPath", type=str, help="Path to vcf2maf", default=os.path.expanduser("~/vcf2maf-1.6.14")) parser.add_argument( "--vepPath", type=str, help="Path to VEP", default=os.path.expanduser("~/vep")) parser.add_argument( "--vepData", type=str, help="Path to VEP data", default=os.path.expanduser("~/.vep")) args = parser.parse_args() main(args.process, project_id=args.project_id, center=args.center, pemfile=args.pemFile, delete_old=args.deleteOld, only_validate=args.onlyValidate, oncotree_link=args.oncotree_link, create_new_maf_database=args.createNewMafDatabase, debug=args.debug, reference=args.reference, vcf2maf_path=args.vcf2mafPath, vep_path=args.vepPath, vep_data=args.vepData)
{ "content_hash": "0d342e9ed3434b2f19833b028c540c46", "timestamp": "", "source": "github", "line_count": 197, "max_line_length": 92, "avg_line_length": 35.50253807106599, "alnum_prop": 0.6259651129539605, "repo_name": "thomasyu888/Genie", "id": "78e23bdd089d8b568d53ece5587507cc2c75bb50", "size": "7018", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bin/input_to_database.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "841335" }, { "name": "Perl", "bytes": "38214" }, { "name": "Python", "bytes": "160066" }, { "name": "R", "bytes": "93771" }, { "name": "Shell", "bytes": "11666" } ], "symlink_target": "" }
""" Return dictionary of clausulae found in the prosody of Latin prose. The clausulae analysis function returns a dictionary in which the key is the type of clausula and the value is the number of times it occurs in the text. The list of clausulae used in the method is derived from the 'Prose Rhythm' section of John Ramsey's Cambridge commentary on Cicero's Philippics I-II, so it is mostly representative of Ciceronian clausulae. Because of the heavy Greek influence on Cicero's rhythms, however, the clausulae analysis may also be used on the prosody of Greek prose as well. """ __author__ = 'Tyler Kirby <tyler.kirby9398@gmail.com>' __license__ = 'MIT License. See LICENSE' class Clausulae: def __init__(self): """Initialize class.""" return @staticmethod def clausulae_analysis(prosody): """ Return dictionary in which the key is a type of clausula and the value is its frequency. :param prosody: the prosody of a prose text (must be in the format of the scansion produced by the scanner classes. :return: dictionary of prosody """ prosody = ''.join(prosody) return { 'cretic + trochee': prosody.count('¯˘¯¯x'), '4th paeon + trochee': prosody.count('˘˘˘¯¯x'), '1st paeon + trochee': prosody.count('¯˘˘˘¯x'), 'substituted cretic + trochee': prosody.count('˘˘˘˘˘¯x'), '1st paeon + anapest': prosody.count('¯˘˘˘˘˘x'), 'double cretic': prosody.count('¯˘¯¯˘x'), '4th paeon + cretic': prosody.count('˘˘˘¯¯˘x'), 'molossus + cretic': prosody.count('¯¯¯¯˘x'), 'double trochee': prosody.count('¯˘¯x'), 'molossus + double trochee': prosody.count('¯¯¯¯˘¯x'), 'cretic + double trochee': prosody.count('¯˘¯¯˘¯x'), 'dactyl + double trochee': prosody.count('¯˘˘¯˘¯x'), 'choriamb + double trochee': prosody.count('¯˘˘¯¯˘¯x'), 'cretic + iamb': prosody.count('¯˘¯˘x'), 'molossus + iamb': prosody.count('¯¯¯˘x'), 'double spondee': prosody.count('¯¯¯x'), 'cretic + double spondee': prosody.count('¯˘¯¯¯¯x'), 'heroic': prosody.count('¯˘˘¯x') }
{ "content_hash": "e375e0977fe24322aa84d5764642b36f", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 123, "avg_line_length": 45.775510204081634, "alnum_prop": 0.5951850200624164, "repo_name": "coderbhupendra/cltk", "id": "bc0051e19f88546f11a742404c21118a9c77f9e5", "size": "2334", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "cltk/prosody/latin/clausulae_analysis.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "1409127" } ], "symlink_target": "" }
import os import sys import getpass import subprocess try: from fabricate import * except ImportError, e: print "Couldn't find the fabricate module." sys.exit(1) MAXOSDIR = os.environ['MAXELEROSDIR'] MAXCOMPILERDIR = os.environ['MAXCOMPILERDIR'] DFE_MODEL = 'ISCA' MAXFILE = 'EthFwd.max' DESIGN_NAME = MAXFILE.replace('.max', '') sources = ['ethFwd.c'] target = 'ethFwd' includes = [] port_ip = { 'TOP': '172.16.50.1' , 'BOT': '172.16.60.1' } port_tap = { 'TOP': '172.16.50.10', 'BOT': '172.16.60.10' } netmask = '255.255.255.0' def slicCompile(): """Compiles a maxfile in to a .o file""" run("%s/bin/sliccompile" % (MAXCOMPILERDIR), MAXFILE, MAXFILE.replace('.max', '.o')) def getMaxCompilerInc(): """Return the includes to be used in the compilation.""" return ['-I.', '-I%s/include' % MAXOSDIR, '-I%s/include/slic' % MAXCOMPILERDIR] def getMaxCompilerLibs(): """Return the libraries to be used in linking.""" return ['-L%s/lib' % MAXCOMPILERDIR, '-L%s/lib' % MAXOSDIR, '-lslic', '-lmaxeleros', '-lm', '-lpthread'] def getLinkerLibs(): """Returns the libraries to be used for linking.""" return getMaxCompilerLibs() + [MAXFILE.replace('.max', '.o')] cflags = ['-ggdb', '-O2', '-fPIC', '-std=gnu99', '-Wall', '-Werror', '-DDESIGN_NAME=%s' % (DESIGN_NAME)] + includes + getMaxCompilerInc() def build(): compile() link() subprocess.call(['test/build.py']) print ("\n\nTo run in simulation, do:\n" "\t$ ./build.py run_sim\n" "Then, in a new terminal:\n" "\t$ cd test\n" "\t$ ./receiver " + port_tap["BOT"] + "\n" "And in another terminal:\n" "\t$ ./sender " + port_ip["TOP"] + "\n") def compile(): slicCompile() for source in sources: run('gcc', cflags, '-c', source, '-o', source.replace('.c', '.o')) def link(): objects = [s.replace('.c', '.o') for s in sources] run('gcc', objects, getLinkerLibs(), '-o', target) def clean(): autoclean() def getSimName(): return getpass.getuser() + 'Sim' def maxcompilersim(): return '%s/bin/maxcompilersim' % MAXCOMPILERDIR def eth_sim(port): cmd = ['-e', 'QSFP_%s_10G_PORT1:%s:%s' % (port, port_tap[port], netmask)] cmd += ['-p', 'QSFP_%s_10G_PORT1:%s.pcap' % (port, port) ] return cmd def run_sim(): restart_sim() subprocess.call(['./' + target, port_ip["TOP"], port_ip["BOT"], port_tap["BOT"]]) def start_sim(): cmd = [maxcompilersim(), '-n', getSimName(), '-c', DFE_MODEL ] + eth_sim("TOP") + eth_sim("BOT") + ["restart"]; print "cmd " + " ".join(cmd); subprocess.call(cmd) def stop_sim(): subprocess.call([maxcompilersim(), '-n', getSimName()] + eth_sim("TOP") + eth_sim("BOT") + ["stop"]) def restart_sim(): start_sim() def sim_debug(): subprocess.call(['maxdebug', '-g', 'graph_%s' % getSimName(), '-d', '%s0:%s' % (getSimName(), getSimName()), MAXFILE]) if __name__ == '__main__': main()
{ "content_hash": "e8ecf873984b069c5052ae34e910f989", "timestamp": "", "source": "github", "line_count": 102, "max_line_length": 120, "avg_line_length": 27.745098039215687, "alnum_prop": 0.6141342756183745, "repo_name": "kklt92/NetworkingCodeExamples", "id": "68fd929b4d75802a8535cd6910ba3167fe08b252", "size": "2849", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "PacketProcessing/EthernetParsing/runtime/build.py", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "C", "bytes": "58027" }, { "name": "Java", "bytes": "137076" }, { "name": "Makefile", "bytes": "1768" }, { "name": "Python", "bytes": "23402" }, { "name": "Shell", "bytes": "3617" } ], "symlink_target": "" }
"""Defines the templating context for SQL Lab""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import inspect import jinja2 from datetime import datetime, timedelta from dateutil.relativedelta import relativedelta import time import textwrap import uuid import random from caravel import app from caravel.utils import CaravelTemplateException config = app.config BASE_CONTEXT = { 'datetime': datetime, 'random': random, 'relativedelta': relativedelta, 'time': time, 'timedelta': timedelta, 'uuid': uuid, } BASE_CONTEXT.update(config.get('JINJA_CONTEXT_ADDONS', {})) class BaseTemplateProcessor(object): """Base class for database-specific jinja context There's this bit of magic in ``process_template`` that instantiates only the database context for the active database as a ``models.Database`` object binds it to the context object, so that object methods have access to that context. This way, {{ hive.latest_partition('mytable') }} just knows about the database it is operating in. This means that object methods are only available for the active database and are given access to the ``models.Database`` object and schema name. For globally available methods use ``@classmethod``. """ engine = None def __init__(self, database=None, query=None, table=None): self.database = database self.query = query self.schema = None if query and query.schema: self.schema = query.schema elif table: self.schema = table.schema self.context = {} self.context.update(BASE_CONTEXT) if self.engine: self.context[self.engine] = self def process_template(self, sql): """Processes a sql template >>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'" >>> process_template(sql) "SELECT '2017-01-01T00:00:00'" """ template = jinja2.Template(sql) return template.render(self.context) class PrestoTemplateProcessor(BaseTemplateProcessor): """Presto Jinja context The methods described here are namespaced under ``presto`` in the jinja context as in ``SELECT '{{ presto.some_macro_call() }}'`` """ engine = 'presto' @staticmethod def _partition_query(table_name, limit=0, order_by=None, filters=None): """Returns a partition query :param table_name: the name of the table to get partitions from :type table_name: str :param limit: the number of partitions to be returned :type limit: int :param order_by: a list of tuples of field name and a boolean that determines if that field should be sorted in descending order :type order_by: list of (str, bool) tuples :param filters: a list of filters to apply :param filters: dict of field anme and filter value combinations """ limit_clause = "LIMIT {}".format(limit) if limit else '' order_by_clause = '' if order_by: l = [] for field, desc in order_by: l.append(field + ' DESC' if desc else '') order_by_clause = 'ORDER BY ' + ', '.join(l) where_clause = '' if filters: l = [] for field, value in filters.items(): l.append("{field} = '{value}'".format(**locals())) where_clause = 'WHERE ' + ' AND '.join(l) sql = textwrap.dedent("""\ SHOW PARTITIONS FROM {table_name} {where_clause} {order_by_clause} {limit_clause} """).format(**locals()) return sql @staticmethod def _schema_table(table_name, schema): if '.' in table_name: schema, table_name = table_name.split('.') return table_name, schema def latest_partition(self, table_name): """Returns the latest (max) partition value for a table :param table_name: the name of the table, can be just the table name or a fully qualified table name as ``schema_name.table_name`` :type table_name: str >>> latest_partition('foo_table') '2018-01-01' """ table_name, schema = self._schema_table(table_name, self.schema) indexes = self.database.get_indexes(table_name, schema) if len(indexes[0]['column_names']) < 1: raise CaravelTemplateException( "The table should have one partitioned field") elif len(indexes[0]['column_names']) > 1: raise CaravelTemplateException( "The table should have a single partitioned field " "to use this function. You may want to use " "`presto.latest_sub_partition`") part_field = indexes[0]['column_names'][0] sql = self._partition_query(table_name, 1, [(part_field, True)]) df = self.database.get_df(sql, schema) return df.to_records(index=False)[0][0] def latest_sub_partition(self, table_name, **kwargs): """Returns the latest (max) partition value for a table A filtering criteria should be passed for all fields that are partitioned except for the field to be returned. For example, if a table is partitioned by (``ds``, ``event_type`` and ``event_category``) and you want the latest ``ds``, you'll want to provide a filter as keyword arguments for both ``event_type`` and ``event_category`` as in ``latest_sub_partition('my_table', event_category='page', event_type='click')`` :param table_name: the name of the table, can be just the table name or a fully qualified table name as ``schema_name.table_name`` :type table_name: str :param kwargs: keyword arguments define the filtering criteria on the partition list. There can be many of these. :type kwargs: str >>> latest_sub_partition('sub_partition_table', event_type='click') '2018-01-01' """ table_name, schema = self._schema_table(table_name, self.schema) indexes = self.database.get_indexes(table_name, schema) part_fields = indexes[0]['column_names'] for k in kwargs.keys(): if k not in k in part_field: msg = "Field [{k}] is not part of the partionning key" raise CaravelTemplateException(msg) if len(kwargs.keys()) != len(part_fields) - 1: msg = ( "A filter needs to be specified for {} out of the " "{} fields." ).format(len(part_fields)-1, len(part_fields)) raise CaravelTemplateException(msg) for field in part_fields: if field not in kwargs.keys(): field_to_return = field sql = self._partition_query( table_name, 1, [(field_to_return, True)], kwargs) df = self.database.get_df(sql, schema) if df.empty: return '' return df.to_dict()[field_to_return][0] template_processors = {} keys = tuple(globals().keys()) for k in keys: o = globals()[k] if o and inspect.isclass(o) and issubclass(o, BaseTemplateProcessor): template_processors[o.engine] = o def get_template_processor(database, table=None, query=None): TP = template_processors.get(database.backend, BaseTemplateProcessor) return TP(database=database, table=table, query=query)
{ "content_hash": "6e6ac1b5272d8721817f53e32d0e45fa", "timestamp": "", "source": "github", "line_count": 206, "max_line_length": 78, "avg_line_length": 36.9126213592233, "alnum_prop": 0.6133613887427669, "repo_name": "jeromecn/caravel_viz_full", "id": "95212c9b28b006fd08d5fa02febec4b09ae25862", "size": "7604", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "caravel/jinja_context.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "57757" }, { "name": "HTML", "bytes": "117323" }, { "name": "JavaScript", "bytes": "413661" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "590411" }, { "name": "Shell", "bytes": "980" } ], "symlink_target": "" }
__author__ = 'Bohdan Mushkevych' from synergy.scheduler.scheduler_constants import TYPE_FREERUN, STATE_MACHINE_FREERUN from synergy.db.model.daemon_process_entry import DaemonProcessEntry from odm.fields import StringField, ListField, ObjectIdField, BooleanField PROCESS_NAME = 'process_name' # name of the process to handle the schedulables ENTRY_NAME = 'entry_name' # name of the schedulable DESCRIPTION = 'description' # description of the schedulable IS_ON = 'is_on' # defines if the schedulable is active or off TRIGGER_FREQUENCY = 'trigger_frequency' # either 'at DoW-HH:MM' or 'every XXX' STATE_MACHINE_NAME = 'state_machine_name' SOURCE = 'source' SINK = 'sink' HISTORIC_LOG = 'historic_log' # contains list of MAX_NUMBER_OF_LOG_ENTRIES last log messages MAX_NUMBER_OF_LOG_ENTRIES = 64 RELATED_UNIT_OF_WORK = 'related_unit_of_work' class FreerunProcessEntry(DaemonProcessEntry): """ Class presents single configuration entry for the freerun process/bash_driver . """ db_id = ObjectIdField('_id', null=True) source = StringField(SOURCE) sink = StringField(SINK) trigger_frequency = StringField(TRIGGER_FREQUENCY) is_on = BooleanField(IS_ON, default=False) state_machine_name = StringField(STATE_MACHINE_NAME) entry_name = StringField(ENTRY_NAME) description = StringField(DESCRIPTION) log = ListField(HISTORIC_LOG) related_unit_of_work = ObjectIdField(RELATED_UNIT_OF_WORK) @DaemonProcessEntry.key.getter def key(self): return self.process_name, self.entry_name @DaemonProcessEntry.key.setter def key(self, value): self.process_name = value[0] self.entry_name = value[1] @property def schedulable_name(self): return '{0}::{1}'.format(self.process_name, self.entry_name) def freerun_context_entry(process_name, entry_name, classname, token, exchange, trigger_frequency, is_on=True, present_on_boxes=None, description=None, arguments=None, queue=None, routing=None, process_type=TYPE_FREERUN, pid_file=None, log_file=None): """ forms process context entry """ _ROUTING_PREFIX = 'routing_' _QUEUE_PREFIX = 'queue_' _SUFFIX = '_freerun' if queue is None: queue = _QUEUE_PREFIX + token + _SUFFIX if routing is None: routing = _ROUTING_PREFIX + token + _SUFFIX if pid_file is None: pid_file = token + _SUFFIX + '.pid' if log_file is None: log_file = token + _SUFFIX + '.log' if arguments is None: arguments = dict() else: assert isinstance(arguments, dict) process_entry = FreerunProcessEntry( process_name=process_name, entry_name=entry_name, trigger_frequency=trigger_frequency, time_qualifier=None, state_machine_name=STATE_MACHINE_FREERUN, is_on=is_on, classname=classname, token=token, present_on_boxes=present_on_boxes, description=description, mq_queue=queue, mq_routing_key=routing, mq_exchange=exchange, arguments=arguments, process_type=process_type, log_filename=log_file, pid_filename=pid_file) return process_entry
{ "content_hash": "3e247408121c8ec685029b1de85bde18", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 102, "avg_line_length": 35.742574257425744, "alnum_prop": 0.603601108033241, "repo_name": "eggsandbeer/scheduler", "id": "f0a403f68e933b9b7fc551888dc7beb6af5aa019", "size": "3610", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "synergy/db/model/freerun_process_entry.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "20292" }, { "name": "HTML", "bytes": "25111" }, { "name": "JavaScript", "bytes": "50141" }, { "name": "Python", "bytes": "543579" }, { "name": "Shell", "bytes": "3484" } ], "symlink_target": "" }
from eventlet import greenthread import mock import six from nova import exception from nova import test from nova.tests.unit.virt.xenapi import stubs from nova.virt.xenapi import volume_utils class SROps(stubs.XenAPITestBaseNoDB): def test_find_sr_valid_uuid(self): self.session = mock.Mock() self.session.call_xenapi.return_value = 'sr_ref' self.assertEqual(volume_utils.find_sr_by_uuid(self.session, 'sr_uuid'), 'sr_ref') def test_find_sr_invalid_uuid(self): class UUIDException(Exception): details = ["UUID_INVALID", "", "", ""] self.session = mock.Mock() self.session.XenAPI.Failure = UUIDException self.session.call_xenapi.side_effect = UUIDException self.assertIsNone( volume_utils.find_sr_by_uuid(self.session, 'sr_uuid')) def test_find_sr_from_vdi(self): vdi_ref = 'fake-ref' def fake_call_xenapi(method, *args): self.assertEqual(method, 'VDI.get_SR') self.assertEqual(args[0], vdi_ref) return args[0] session = mock.Mock() session.call_xenapi.side_effect = fake_call_xenapi self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref), vdi_ref) def test_find_sr_from_vdi_exception(self): vdi_ref = 'fake-ref' class FakeException(Exception): pass session = mock.Mock() session.XenAPI.Failure = FakeException session.call_xenapi.side_effect = FakeException self.assertRaises(exception.StorageError, volume_utils.find_sr_from_vdi, session, vdi_ref) class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB): def test_target_host(self): self.assertEqual(volume_utils._get_target_host('host:port'), 'host') self.assertEqual(volume_utils._get_target_host('host'), 'host') # There is no default value self.assertIsNone(volume_utils._get_target_host(':port')) self.assertIsNone(volume_utils._get_target_host(None)) def test_target_port(self): self.assertEqual(volume_utils._get_target_port('host:port'), 'port') self.assertEqual(volume_utils._get_target_port('host'), 3260) class IntroduceTestCase(stubs.XenAPITestBaseNoDB): @mock.patch.object(volume_utils, '_get_vdi_ref') @mock.patch.object(greenthread, 'sleep') def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref): def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun): fake_get_vdi_ref.call_count += 1 if fake_get_vdi_ref.call_count == 2: return 'vdi_ref' def fake_call_xenapi(method, *args): if method == 'SR.scan': return elif method == 'VDI.get_record': return {'managed': 'true'} session = mock.Mock() session.call_xenapi.side_effect = fake_call_xenapi mock_get_vdi_ref.side_effect = fake_get_vdi_ref fake_get_vdi_ref.call_count = 0 self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'), 'vdi_ref') mock_sleep.assert_called_once_with(20) @mock.patch.object(volume_utils, '_get_vdi_ref') @mock.patch.object(greenthread, 'sleep') def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref): def fake_call_xenapi(method, *args): if method == 'SR.scan': return elif method == 'VDI.get_record': return {'managed': 'true'} session = mock.Mock() session.call_xenapi.side_effect = fake_call_xenapi mock_get_vdi_ref.return_value = None self.assertRaises(exception.StorageError, volume_utils.introduce_vdi, session, 'sr_ref') mock_sleep.assert_called_once_with(20) class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB): def test_mountpoint_to_number(self): cases = { 'sda': 0, 'sdp': 15, 'hda': 0, 'hdp': 15, 'vda': 0, 'xvda': 0, '0': 0, '10': 10, 'vdq': -1, 'sdq': -1, 'hdq': -1, 'xvdq': -1, } for (input, expected) in cases.items(): actual = volume_utils._mountpoint_to_number(input) self.assertEqual(actual, expected, '%s yielded %s, not %s' % (input, actual, expected)) @classmethod def _make_connection_info(cls): target_iqn = 'iqn.2010-10.org.openstack:volume-00000001' return {'driver_volume_type': 'iscsi', 'data': {'volume_id': 1, 'target_iqn': target_iqn, 'target_portal': '127.0.0.1:3260,fake', 'target_lun': None, 'auth_method': 'CHAP', 'auth_username': 'username', 'auth_password': 'verybadpass'}} def test_parse_volume_info_parsing_auth_details(self): conn_info = self._make_connection_info() result = volume_utils._parse_volume_info(conn_info['data']) self.assertEqual('username', result['chapuser']) self.assertEqual('verybadpass', result['chappassword']) def test_parse_volume_info_missing_details(self): # Tests that a StorageError is raised if volume_id, target_host, or # target_ign is missing from connection_data. Also ensures that the # auth_password value is not present in the StorageError message. for data_key_to_null in ('volume_id', 'target_portal', 'target_iqn'): conn_info = self._make_connection_info() conn_info['data'][data_key_to_null] = None ex = self.assertRaises(exception.StorageError, volume_utils._parse_volume_info, conn_info['data']) self.assertNotIn('verybadpass', six.text_type(ex)) def test_get_device_number_raise_exception_on_wrong_mountpoint(self): self.assertRaises( exception.StorageError, volume_utils.get_device_number, 'dev/sd') class FindVBDTestCase(stubs.XenAPITestBaseNoDB): def test_find_vbd_by_number_works(self): session = mock.Mock() session.VM.get_VBDs.return_value = ["a", "b"] session.VBD.get_userdevice.return_value = "1" result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) self.assertEqual("a", result) session.VM.get_VBDs.assert_called_once_with("vm_ref") session.VBD.get_userdevice.assert_called_once_with("a") def test_find_vbd_by_number_no_matches(self): session = mock.Mock() session.VM.get_VBDs.return_value = ["a", "b"] session.VBD.get_userdevice.return_value = "3" result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) self.assertIsNone(result) session.VM.get_VBDs.assert_called_once_with("vm_ref") expected = [mock.call("a"), mock.call("b")] self.assertEqual(expected, session.VBD.get_userdevice.call_args_list) def test_find_vbd_by_number_no_vbds(self): session = mock.Mock() session.VM.get_VBDs.return_value = [] result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) self.assertIsNone(result) session.VM.get_VBDs.assert_called_once_with("vm_ref") self.assertFalse(session.VBD.get_userdevice.called) def test_find_vbd_by_number_ignores_exception(self): session = mock.Mock() session.XenAPI.Failure = test.TestingException session.VM.get_VBDs.return_value = ["a"] session.VBD.get_userdevice.side_effect = test.TestingException result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) self.assertIsNone(result) session.VM.get_VBDs.assert_called_once_with("vm_ref") session.VBD.get_userdevice.assert_called_once_with("a") class IntroduceSRTestCase(stubs.XenAPITestBaseNoDB): @mock.patch.object(volume_utils, '_create_pbd') def test_backend_kind(self, create_pbd): session = mock.Mock() session.product_version = (6, 5, 0) session.call_xenapi.return_value = 'sr_ref' params = {'sr_type': 'iscsi'} sr_uuid = 'sr_uuid' label = 'label' expected_params = {'backend-kind': 'vbd'} volume_utils.introduce_sr(session, sr_uuid, label, params) session.call_xenapi.assert_any_call('SR.introduce', sr_uuid, label, '', 'iscsi', '', False, expected_params) @mock.patch.object(volume_utils, '_create_pbd') def test_backend_kind_upstream_fix(self, create_pbd): session = mock.Mock() session.product_version = (7, 0, 0) session.call_xenapi.return_value = 'sr_ref' params = {'sr_type': 'iscsi'} sr_uuid = 'sr_uuid' label = 'label' expected_params = {} volume_utils.introduce_sr(session, sr_uuid, label, params) session.call_xenapi.assert_any_call('SR.introduce', sr_uuid, label, '', 'iscsi', '', False, expected_params) class BootedFromVolumeTestCase(stubs.XenAPITestBaseNoDB): def test_booted_from_volume(self): session = mock.Mock() session.VM.get_VBDs.return_value = ['vbd_ref'] session.VBD.get_userdevice.return_value = '0' session.VBD.get_other_config.return_value = {'osvol': True} booted_from_volume = volume_utils.is_booted_from_volume(session, 'vm_ref') self.assertTrue(booted_from_volume) def test_not_booted_from_volume(self): session = mock.Mock() session.VM.get_VBDs.return_value = ['vbd_ref'] session.VBD.get_userdevice.return_value = '0' session.VBD.get_other_config.return_value = {} booted_from_volume = volume_utils.is_booted_from_volume(session, 'vm_ref') self.assertFalse(booted_from_volume) class MultipleVolumesTestCase(stubs.XenAPITestBaseNoDB): def test_sr_info_two_luns(self): data1 = {'target_portal': 'host:port', 'target_iqn': 'iqn', 'volume_id': 'vol_id_1', 'target_lun': 1} data2 = {'target_portal': 'host:port', 'target_iqn': 'iqn', 'volume_id': 'vol_id_2', 'target_lun': 2} (sr_uuid1, label1, params1) = volume_utils.parse_sr_info(data1) (sr_uuid2, label2, params2) = volume_utils.parse_sr_info(data2) self.assertEqual(sr_uuid1, sr_uuid2) self.assertEqual(label1, label2) @mock.patch.object(volume_utils, 'forget_sr') def test_purge_sr_no_VBDs(self, mock_forget): def _call_xenapi(func, *args): if func == 'SR.get_VDIs': return ['VDI1', 'VDI2'] if func == 'VDI.get_VBDs': return [] self.session = mock.Mock() self.session.call_xenapi = _call_xenapi volume_utils.purge_sr(self.session, 'SR') mock_forget.assert_called_once_with(self.session, 'SR') @mock.patch.object(volume_utils, 'forget_sr') def test_purge_sr_in_use(self, mock_forget): def _call_xenapi(func, *args): if func == 'SR.get_VDIs': return ['VDI1', 'VDI2'] if func == 'VDI.get_VBDs': if args[0] == 'VDI1': return ['VBD1'] if args[0] == 'VDI2': return ['VBD2'] self.session = mock.Mock() self.session.call_xenapi = _call_xenapi volume_utils.purge_sr(self.session, 'SR') self.assertEqual([], mock_forget.mock_calls) class TestStreamToVDI(stubs.XenAPITestBaseNoDB): @mock.patch.object(volume_utils, '_stream_to_vdi') @mock.patch.object(volume_utils, '_get_vdi_import_path', return_value='vdi_import_path') def test_creates_task_conn(self, mock_import_path, mock_stream): session = stubs.get_fake_session() session.custom_task = mock.MagicMock() session.custom_task.return_value.__enter__.return_value = 'task' session.http_connection = mock.MagicMock() session.http_connection.return_value.__enter__.return_value = 'conn' instance = {'name': 'instance-name'} volume_utils.stream_to_vdi(session, instance, 'vhd', 'file_obj', 100, 'vdi_ref') session.custom_task.assert_called_with('VDI_IMPORT_for_instance-name') mock_stream.assert_called_with('conn', 'vdi_import_path', 100, 'file_obj') self.assertTrue(session.http_connection.return_value.__exit__.called) self.assertTrue(session.custom_task.return_value.__exit__.called) def test_stream_to_vdi_tiny(self): mock_file = mock.Mock() mock_file.read.side_effect = ['a'] mock_conn = mock.Mock() resp = mock.Mock() resp.status = '200' resp.reason = 'OK' mock_conn.getresponse.return_value = resp volume_utils._stream_to_vdi(mock_conn, '/path', 1, mock_file) args, kwargs = mock_conn.request.call_args self.assertEqual(kwargs['headers']['Content-Length'], '1') mock_file.read.assert_called_once_with(1) mock_conn.send.assert_called_once_with('a') def test_stream_to_vdi_chunk_multiple(self): mock_file = mock.Mock() mock_file.read.side_effect = ['aaaaa', 'bbbbb'] mock_conn = mock.Mock() resp = mock.Mock() resp.status = '200' resp.reason = 'OK' mock_conn.getresponse.return_value = resp tot_size = 2 * 16 * 1024 volume_utils._stream_to_vdi(mock_conn, '/path', tot_size, mock_file) args, kwargs = mock_conn.request.call_args self.assertEqual(kwargs['headers']['Content-Length'], str(tot_size)) mock_file.read.assert_has_calls([mock.call(16 * 1024), mock.call(16 * 1024)]) mock_conn.send.assert_has_calls([mock.call('aaaaa'), mock.call('bbbbb')]) def test_stream_to_vdi_chunk_remaining(self): mock_file = mock.Mock() mock_file.read.side_effect = ['aaaaa', 'bb'] mock_conn = mock.Mock() resp = mock.Mock() resp.status = '200' resp.reason = 'OK' mock_conn.getresponse.return_value = resp tot_size = 16 * 1024 + 1024 volume_utils._stream_to_vdi(mock_conn, '/path', tot_size, mock_file) args, kwargs = mock_conn.request.call_args self.assertEqual(kwargs['headers']['Content-Length'], str(tot_size)) mock_file.read.assert_has_calls([mock.call(16 * 1024), mock.call(1024)]) mock_conn.send.assert_has_calls([mock.call('aaaaa'), mock.call('bb')])
{ "content_hash": "8ef86d069ae9c274c5ce128fad63fce5", "timestamp": "", "source": "github", "line_count": 401, "max_line_length": 78, "avg_line_length": 38.12967581047381, "alnum_prop": 0.5750163505559189, "repo_name": "vmturbo/nova", "id": "e7bc570e73ae72ce7ec05226a64038d16f53d4e1", "size": "15926", "binary": false, "copies": "5", "ref": "refs/heads/master", "path": "nova/tests/unit/virt/xenapi/test_volume_utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "601" }, { "name": "PHP", "bytes": "4503" }, { "name": "Python", "bytes": "18983608" }, { "name": "Shell", "bytes": "31813" }, { "name": "Smarty", "bytes": "307089" } ], "symlink_target": "" }
from django.http import HttpResponseRedirect from django.views.generic.list import ListView from django.utils.translation import ugettext as _ from django.contrib import messages from django.contrib.auth import REDIRECT_FIELD_NAME from django.contrib.auth.decorators import login_required from account.mixins import LoginRequiredMixin from social_auth.decorators import dsa_view from social_auth.models import UserSocialAuth from social_auth.utils import backend_setting from social_auth.views import DEFAULT_REDIRECT class SocialAuths(LoginRequiredMixin, ListView): model = UserSocialAuth def get_queryset(self): qs = super(SocialAuths, self).get_queryset() qs = qs.filter(user=self.request.user) return qs @login_required @dsa_view() def disconnect(request, backend, association_id=None): associated = request.user.social_auth.count() url = request.REQUEST.get(REDIRECT_FIELD_NAME, '') or backend_setting(backend, 'SOCIAL_AUTH_DISCONNECT_REDIRECT_URL') or DEFAULT_REDIRECT if not request.user.has_usable_password() and associated <= 1: messages.error(request, _("Cannot remove the only Social Account without first setting a Password or adding another Social Account.")) return HttpResponseRedirect(url) usa = request.user.social_auth.get(pk=association_id) backend.disconnect(request.user, association_id) messages.success(request, _("Removed the %(provider)s account '%(uid)s'.") % { "provider": usa.provider, "uid": usa.extra_data.get("display", usa.uid) if usa.extra_data is not None else usa.uid, }) return HttpResponseRedirect(url)
{ "content_hash": "4b1ab38e2f6dddb939a54384f7ae4c5b", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 142, "avg_line_length": 37.45454545454545, "alnum_prop": 0.7475728155339806, "repo_name": "eldarion/pycon", "id": "01ace0057abea581426daee436c82138d66b99f1", "size": "1648", "binary": false, "copies": "9", "ref": "refs/heads/2013", "path": "symposion/social_auth/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "431427" }, { "name": "Python", "bytes": "201736" }, { "name": "Shell", "bytes": "198" } ], "symlink_target": "" }
import logging import pymongo import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) from comm import singleton from global_const import MONGO_HOST, MONGO_PORT, MONGO_USR, MONGO_PWD, MONGO_DB # category options class category_dao(singleton): __category_collection = None; def __init__(self): if self.__category_collection is None: conn = pymongo.MongoClient(MONGO_HOST, MONGO_PORT); db = conn[MONGO_DB]; db.authenticate(MONGO_USR, MONGO_PWD); self.__category_collection = db.category; else: logging.info("category_dao has inited......"); def create(self, json): self.__category_collection.insert(json); logging.info("create category success......"); def update(self, json): _id = json["_id"]; self.__category_collection.update({"_id":_id},{"$set":json}); logging.info("update category success......"); def delete(self, _id): self.__category_collection.remove({"_id":_id}); logging.info("delete category success......"); def query_by_vendor(self, vendor_id): cursor = self.__category_collection.find({"vendor_id":vendor_id}) array = [] for i in cursor: array.append(i) return array def query(self, _id): cursor = self.__category_collection.find({"_id":_id}) data = None for i in cursor: data = i return data
{ "content_hash": "4153a612ea7ff52b7033b80b20fd4808", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 79, "avg_line_length": 28.32075471698113, "alnum_prop": 0.5856095936042638, "repo_name": "ThomasZh/legend-club-wxpub", "id": "29c20cd965252958c68e23852a1e56292ca065d9", "size": "2146", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "foo/dao/category_dao.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "1682024" }, { "name": "HTML", "bytes": "1668439" }, { "name": "JavaScript", "bytes": "3737917" }, { "name": "PHP", "bytes": "45572" }, { "name": "Python", "bytes": "424590" }, { "name": "Shell", "bytes": "1249" } ], "symlink_target": "" }
""" WSGI config for my_proj project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_proj.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
{ "content_hash": "d6e42a945c383300d4719ac81563d168", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 78, "avg_line_length": 27.785714285714285, "alnum_prop": 0.7686375321336761, "repo_name": "noisebridge/PythonClass", "id": "f65bfae7a8e08b54c94678e27aa97d63e41fd3c4", "size": "389", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "instructors/lessons/django-part2/examples/repo_root/my_proj/wsgi.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "3255" }, { "name": "HTML", "bytes": "524536" }, { "name": "Jupyter Notebook", "bytes": "493067" }, { "name": "Mako", "bytes": "824" }, { "name": "Perl", "bytes": "34109" }, { "name": "Python", "bytes": "474536" }, { "name": "Shell", "bytes": "263" } ], "symlink_target": "" }
from telex import auth from telex.utils.decorators import pm_only from telex import plugin class ConfigPlugin(plugin.TelexPlugin): """ Plugin to manage other plugin configuration. """ patterns = { "^{prefix}config ([\w-]+) show$": "show_options", "^{prefix}config ([\w-]+) set ([\w-]+) \"(.+)\"": "set_option", "^{prefix}config ([\w-]+) get ([\w-]+)": "get_option", } usage = [ "{prefix}config <plugin_name> show: List all plugin options", "{prefix}config <plugin_name> set <configname> \"<value>\": Get plugin value.", "{prefix}config <plugin_name> get <configname>: Get config value.", ] @auth.authorize(groups=["admins"]) @pm_only def show_options(self, msg, matches): try: plugin = self.plugin_manager.getPluginByName(matches.group(1)).plugin_object except AttributeError: return "No plugin found for {}".format(matches.group(1)) if not hasattr(plugin, "config_options"): return "Plugin {} has no config_options defined, cannot list options".format(matches.group(1)) text = "Configuration options for {}:\n{}".format(matches.group(1), "\n".join(["{}: {}".format(k, v) for k, v in plugin.config_options.items()])) return text @auth.authorize(groups=["admins"]) @pm_only def set_option(self, msg, matches): try: plugin = self.plugin_manager.getPluginByName(matches.group(1)).plugin_object except AttributeError: return "No plugin found for {}".format(matches.group(1)) plugin.write_option(matches.group(2), matches.group(3)) return "Set {}.{} to {}".format(matches.group(1), matches.group(2), matches.group(3)) @auth.authorize(groups=["admins"]) @pm_only def get_option(self, msg, matches): try: plugin = self.plugin_manager.getPluginByName(matches.group(1)).plugin_object except AttributeError: return "No plugin found for {}".format(matches.group(1)) return "{}.{} = {}".format(matches.group(1), matches.group(2), plugin.read_option(matches.group(2)))
{ "content_hash": "a00eda0cebc587026cebf67e57ab8ac2", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 108, "avg_line_length": 38.82142857142857, "alnum_prop": 0.6048758049678012, "repo_name": "datamachine/telex", "id": "c61146c61374bf8a8ab980422f517d9a8c74fcb8", "size": "2174", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "plugins/config.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "156218" }, { "name": "Shell", "bytes": "624" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/ship/attachment/wing/shared_xwing_wing_pos_s01.iff" result.attribute_template_id = 8 result.stfName("item_n","ship_attachment") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "262872a5c7115c64ce178fe4dea5db54", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 87, "avg_line_length": 24.53846153846154, "alnum_prop": 0.7021943573667712, "repo_name": "obi-two/Rebelion", "id": "be95d79f0cdfa9f5e1a89062ae8ed3705d709e9a", "size": "464", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "data/scripts/templates/object/tangible/ship/attachment/wing/shared_xwing_wing_pos_s01.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11818" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2293610" }, { "name": "CMake", "bytes": "39727" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7499185" }, { "name": "SQLPL", "bytes": "41864" } ], "symlink_target": "" }
import os import sys import BaseHTTPServer import SocketServer import urlparse class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler): homedir = '' known_content = { '.htm': 'text/html', '.html': 'text/html', '.css': 'text/css', '.js': 'text/javascript', '.mid': 'audio/midi'} def _set_headers(self, content_type): self.send_response(200) self.send_header('Content-type', ''+content_type) self.end_headers() def do_HEAD(self): self._set_headers('html') def do_GET(self): path = urlparse.urlparse(self.path).path fname, fext = os.path.splitext(path) if fext in MyHandler.known_content: self._set_headers(MyHandler.known_content[fext]) try: with open(unicode(os.path.join(MyHandler.homedir, os.path.basename(path))), "rb") as f: self.wfile.write(f.read()) except: pass def do_POST(self): pass # ffu class MyServer(object): def __init__(self, homedir, port): MyHandler.homedir = homedir self._port = port self._httpd = SocketServer.TCPServer(("", port), MyHandler) def run(self): self._httpd.serve_forever() def main(): root = os.getcwd() port = 8123 if len(sys.argv) == 1 else int(sys.argv[1]) server = MyServer(root, port) print 'running....' print '\t Root: ', root print '\t Port: ', port server.run() if __name__ == '__main__': main()
{ "content_hash": "9696f70a89a303ed9730fc7abab08c94", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 103, "avg_line_length": 26.93103448275862, "alnum_prop": 0.558258642765685, "repo_name": "Lirazs/Midi-Music-Teacher", "id": "12ab6ae5377e7a83aa3102ceb16a8541cbc7a1a6", "size": "1588", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "server.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "6451" }, { "name": "HTML", "bytes": "32041" }, { "name": "JavaScript", "bytes": "41646" }, { "name": "Python", "bytes": "1588" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.linalg import linalg as linalg_lib from tensorflow.python.ops.linalg import linear_operator_test_util from tensorflow.python.platform import test linalg = linalg_lib random_seed.set_random_seed(23) rng = np.random.RandomState(0) class BaseLinearOperatorLowRankUpdatetest(object): """Base test for this type of operator.""" # Subclasses should set these attributes to either True or False. # If True, A = L + UDV^H # If False, A = L + UV^H or A = L + UU^H, depending on _use_v. _use_diag_update = None # If True, diag is > 0, which means D is symmetric positive definite. _is_diag_update_positive = None # If True, A = L + UDV^H # If False, A = L + UDU^H or A = L + UU^H, depending on _use_diag_update _use_v = None @property def _dtypes_to_test(self): # TODO(langmore) Test complex types once cholesky works with them. # See comment in LinearOperatorLowRankUpdate.__init__. return [dtypes.float32, dtypes.float64] @property def _operator_build_infos(self): build_info = linear_operator_test_util.OperatorBuildInfo # Previously we had a (2, 10, 10) shape at the end. We did this to test the # inversion and determinant lemmas on not-tiny matrices, since these are # known to have stability issues. This resulted in test timeouts, so this # shape has been removed, but rest assured, the tests did pass. return [ build_info((0, 0)), build_info((1, 1)), build_info((1, 3, 3)), build_info((3, 4, 4)), build_info((2, 1, 4, 4))] def _operator_and_matrix(self, build_info, dtype, use_placeholder): # Recall A = L + UDV^H shape = list(build_info.shape) diag_shape = shape[:-1] k = shape[-2] // 2 + 1 u_perturbation_shape = shape[:-1] + [k] diag_update_shape = shape[:-2] + [k] # base_operator L will be a symmetric positive definite diagonal linear # operator, with condition number as high as 1e4. base_diag = linear_operator_test_util.random_uniform( diag_shape, minval=1e-4, maxval=1., dtype=dtype) lin_op_base_diag = base_diag # U u = linear_operator_test_util.random_normal_correlated_columns( u_perturbation_shape, dtype=dtype) lin_op_u = u # V v = linear_operator_test_util.random_normal_correlated_columns( u_perturbation_shape, dtype=dtype) lin_op_v = v # D if self._is_diag_update_positive: diag_update = linear_operator_test_util.random_uniform( diag_update_shape, minval=1e-4, maxval=1., dtype=dtype) else: diag_update = linear_operator_test_util.random_normal( diag_update_shape, stddev=1e-4, dtype=dtype) lin_op_diag_update = diag_update if use_placeholder: lin_op_base_diag = array_ops.placeholder_with_default( base_diag, shape=None) lin_op_u = array_ops.placeholder_with_default(u, shape=None) lin_op_v = array_ops.placeholder_with_default(v, shape=None) lin_op_diag_update = array_ops.placeholder_with_default( diag_update, shape=None) base_operator = linalg.LinearOperatorDiag( lin_op_base_diag, is_positive_definite=True) operator = linalg.LinearOperatorLowRankUpdate( base_operator, lin_op_u, v=lin_op_v if self._use_v else None, diag_update=lin_op_diag_update if self._use_diag_update else None, is_diag_update_positive=self._is_diag_update_positive) # The matrix representing L base_diag_mat = array_ops.matrix_diag(base_diag) # The matrix representing D diag_update_mat = array_ops.matrix_diag(diag_update) # Set up mat as some variant of A = L + UDV^H if self._use_v and self._use_diag_update: # In this case, we have L + UDV^H and it isn't symmetric. expect_use_cholesky = False matrix = base_diag_mat + math_ops.matmul( u, math_ops.matmul(diag_update_mat, v, adjoint_b=True)) elif self._use_v: # In this case, we have L + UDV^H and it isn't symmetric. expect_use_cholesky = False matrix = base_diag_mat + math_ops.matmul(u, v, adjoint_b=True) elif self._use_diag_update: # In this case, we have L + UDU^H, which is PD if D > 0, since L > 0. expect_use_cholesky = self._is_diag_update_positive matrix = base_diag_mat + math_ops.matmul( u, math_ops.matmul(diag_update_mat, u, adjoint_b=True)) else: # In this case, we have L + UU^H, which is PD since L > 0. expect_use_cholesky = True matrix = base_diag_mat + math_ops.matmul(u, u, adjoint_b=True) if expect_use_cholesky: self.assertTrue(operator._use_cholesky) else: self.assertFalse(operator._use_cholesky) return operator, matrix class LinearOperatorLowRankUpdatetestWithDiagUseCholesky( BaseLinearOperatorLowRankUpdatetest, linear_operator_test_util.SquareLinearOperatorDerivedClassTest): """A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky.""" _use_diag_update = True _is_diag_update_positive = True _use_v = False def setUp(self): # Decrease tolerance since we are testing with condition numbers as high as # 1e4. self._atol[dtypes.float32] = 1e-5 self._rtol[dtypes.float32] = 1e-5 self._atol[dtypes.float64] = 1e-10 self._rtol[dtypes.float64] = 1e-10 class LinearOperatorLowRankUpdatetestWithDiagCannotUseCholesky( BaseLinearOperatorLowRankUpdatetest, linear_operator_test_util.SquareLinearOperatorDerivedClassTest): """A = L + UDU^H, D !> 0, L > 0 ==> A !> 0 and we cannot use a Cholesky.""" _use_diag_update = True _is_diag_update_positive = False _use_v = False def setUp(self): # Decrease tolerance since we are testing with condition numbers as high as # 1e4. This class does not use Cholesky, and thus needs even looser # tolerance. self._atol[dtypes.float32] = 1e-4 self._rtol[dtypes.float32] = 1e-4 self._atol[dtypes.float64] = 1e-9 self._rtol[dtypes.float64] = 1e-9 class LinearOperatorLowRankUpdatetestNoDiagUseCholesky( BaseLinearOperatorLowRankUpdatetest, linear_operator_test_util.SquareLinearOperatorDerivedClassTest): """A = L + UU^H, L > 0 ==> A > 0 and we can use a Cholesky.""" _use_diag_update = False _is_diag_update_positive = None _use_v = False def setUp(self): # Decrease tolerance since we are testing with condition numbers as high as # 1e4. self._atol[dtypes.float32] = 1e-5 self._rtol[dtypes.float32] = 1e-5 self._atol[dtypes.float64] = 1e-10 self._rtol[dtypes.float64] = 1e-10 class LinearOperatorLowRankUpdatetestNoDiagCannotUseCholesky( BaseLinearOperatorLowRankUpdatetest, linear_operator_test_util.SquareLinearOperatorDerivedClassTest): """A = L + UV^H, L > 0 ==> A is not symmetric and we cannot use a Cholesky.""" _use_diag_update = False _is_diag_update_positive = None _use_v = True def setUp(self): # Decrease tolerance since we are testing with condition numbers as high as # 1e4. This class does not use Cholesky, and thus needs even looser # tolerance. self._atol[dtypes.float32] = 1e-4 self._rtol[dtypes.float32] = 1e-4 self._atol[dtypes.float64] = 1e-9 self._rtol[dtypes.float64] = 1e-9 class LinearOperatorLowRankUpdatetestWithDiagNotSquare( BaseLinearOperatorLowRankUpdatetest, linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest): """A = L + UDU^H, D > 0, L > 0 ==> A > 0 and we can use a Cholesky.""" _use_diag_update = True _is_diag_update_positive = True _use_v = True class LinearOpearatorLowRankUpdateBroadcastsShape(test.TestCase): """Test that the operator's shape is the broadcast of arguments.""" def test_static_shape_broadcasts_up_from_operator_to_other_args(self): base_operator = linalg.LinearOperatorIdentity(num_rows=3) u = array_ops.ones(shape=[2, 3, 2]) diag = array_ops.ones(shape=[2, 2]) operator = linalg.LinearOperatorLowRankUpdate(base_operator, u, diag) # domain_dimension is 3 self.assertAllEqual([2, 3, 3], operator.shape) with self.test_session(): self.assertAllEqual([2, 3, 3], operator.to_dense().eval().shape) def test_dynamic_shape_broadcasts_up_from_operator_to_other_args(self): num_rows_ph = array_ops.placeholder(dtypes.int32) base_operator = linalg.LinearOperatorIdentity(num_rows=num_rows_ph) u_shape_ph = array_ops.placeholder(dtypes.int32) u = array_ops.ones(shape=u_shape_ph) operator = linalg.LinearOperatorLowRankUpdate(base_operator, u) feed_dict = { num_rows_ph: 3, u_shape_ph: [2, 3, 2], # batch_shape = [2] } with self.test_session(): shape_tensor = operator.shape_tensor().eval(feed_dict=feed_dict) self.assertAllEqual([2, 3, 3], shape_tensor) dense = operator.to_dense().eval(feed_dict=feed_dict) self.assertAllEqual([2, 3, 3], dense.shape) def test_u_and_v_incompatible_batch_shape_raises(self): base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64) u = rng.rand(5, 3, 2) v = rng.rand(4, 3, 2) with self.assertRaisesRegexp(ValueError, "Incompatible shapes"): linalg.LinearOperatorLowRankUpdate(base_operator, u=u, v=v) def test_u_and_base_operator_incompatible_batch_shape_raises(self): base_operator = linalg.LinearOperatorIdentity( num_rows=3, batch_shape=[4], dtype=np.float64) u = rng.rand(5, 3, 2) with self.assertRaisesRegexp(ValueError, "Incompatible shapes"): linalg.LinearOperatorLowRankUpdate(base_operator, u=u) def test_u_and_base_operator_incompatible_domain_dimension(self): base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64) u = rng.rand(5, 4, 2) with self.assertRaisesRegexp(ValueError, "not compatible"): linalg.LinearOperatorLowRankUpdate(base_operator, u=u) def test_u_and_diag_incompatible_low_rank_raises(self): base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64) u = rng.rand(5, 3, 2) diag = rng.rand(5, 4) # Last dimension should be 2 with self.assertRaisesRegexp(ValueError, "not compatible"): linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag) def test_diag_incompatible_batch_shape_raises(self): base_operator = linalg.LinearOperatorIdentity(num_rows=3, dtype=np.float64) u = rng.rand(5, 3, 2) diag = rng.rand(4, 2) # First dimension should be 5 with self.assertRaisesRegexp(ValueError, "Incompatible shapes"): linalg.LinearOperatorLowRankUpdate(base_operator, u=u, diag_update=diag) if __name__ == "__main__": test.main()
{ "content_hash": "ec71dba5c5748c7ee35a168c812cb8db", "timestamp": "", "source": "github", "line_count": 297, "max_line_length": 80, "avg_line_length": 36.93939393939394, "alnum_prop": 0.6842584996809771, "repo_name": "meteorcloudy/tensorflow", "id": "34b35a4ffb878c63f851f2b31491e7bfa4057417", "size": "11661", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "9258" }, { "name": "C", "bytes": "307095" }, { "name": "C++", "bytes": "44316209" }, { "name": "CMake", "bytes": "206677" }, { "name": "Go", "bytes": "1163771" }, { "name": "HTML", "bytes": "4680032" }, { "name": "Java", "bytes": "781209" }, { "name": "Jupyter Notebook", "bytes": "2244126" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "49862" }, { "name": "Objective-C", "bytes": "15650" }, { "name": "Objective-C++", "bytes": "99265" }, { "name": "PHP", "bytes": "2140" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "37635923" }, { "name": "Ruby", "bytes": "533" }, { "name": "Shell", "bytes": "446907" }, { "name": "Smarty", "bytes": "6870" } ], "symlink_target": "" }
import os # import distutils from distutils import dist cwd = os.getcwd() def lines(string): if not string: return [] _lines = string.splitlines() _lines = list(filter(lambda l: l.lstrip().rstrip(), _lines)) _lines = list(filter(lambda l: l, _lines)) return _lines def read(path): if os.path.exists(path) and os.path.isfile(path): return open(path).read() def readlines(path): if os.path.exists(path) and os.path.isfile(path): read = open(path).read() return lines(read) return [] # class DistributionMetadata(distutils.dist.DistributionMetadata): class DistributionMetadata(dist.DistributionMetadata): # todo: entry_points def get_name(self): # return self.name or "UNKNOWN" if self.name: return self.name key = "NAME" if key in os.environ and os.environ[key]: return os.environ[key] return os.path.basename(cwd).split(".")[0].lower() def get_version(self): # return self.version or "0.0.0" if self.version: return self.version for filename in ["version.txt","version"]: path = os.path.join(cwd,filename) if read(path): return read(path) key = "VERSION" if os.environ.get(key,None): return os.environ[key] return "0.0.0" def get_license(self): # return self.license or "UNKNOWN" if self.license: return self.license key = "LICENCE" if os.environ.get(key,None): return [os.environ[key]] return "UNKNOWN" def get_description(self): # return self._encode_field(self.description) or "UNKNOWN" if self.description: return self.description for filename in ["description","description.txt"]: path = os.path.join(cwd,filename) if read(path): return read(path) key = "DESCRIPTION" if key in os.environ and os.environ[key]: return os.environ[key] return "UNKNOWN" def get_long_description(self): # return self._encode_field(self.long_description) or "UNKNOWN" if self.long_description: return self.long_description for filename in ["README.rst","README"]: path = os.path.join(cwd,filename) if read(path): return read(path) key = "LONG_DESCRIPTION" if os.environ.get(key,None): return os.environ[key] return "UNKNOWN" def get_keywords(self): # return self.keywords or [] if self.keywords: return self.keywords path = os.path.join(cwd,"keywords.txt") if read(path): return [read(path)] key = "KEYWORDS" if os.environ.get(key,None): return [os.environ[key]] return [] def get_platforms(self): # return self.platforms or ["UNKNOWN"] if self.platforms: return self.platforms path = os.path.join(cwd,"platforms.txt") if read(path): return [read(path)] key = "PLATFORMS" if os.environ.get(key,None): return [os.environ[key]] return ["UNKNOWN"] def get_classifiers(self): # return self.classifiers or [] if self.classifiers: return sorted(self.classifiers) classifiers = [] path = os.path.join(cwd,"classifiers.txt") if os.path.exists(path): classifiers = readlines(path) key = "CLASSIFIERS" if key in os.environ and os.environ[key]: classifiers+=os.environ[key].splitlines() classifiers = filter(None,classifiers) # remove empty classifiers = list(set(classifiers)) # unique return list(sorted(classifiers)) def get_download_url(self): # return self.download_url or "UNKNOWN" if self.download_url: return self.download_url key = "DOWNLOAD_URL" if os.environ.get(key,None): return os.environ[key] return "UNKNOWN" def get_url(self): # return self.url or "UNKNOWN" if self.url: return self.url key = "URL" if os.environ.get(key,None): return os.environ[key] return "UNKNOWN"
{ "content_hash": "5659ccce5adf5855ef528f6b31dacd6b", "timestamp": "", "source": "github", "line_count": 140, "max_line_length": 71, "avg_line_length": 31.22142857142857, "alnum_prop": 0.5703500343170899, "repo_name": "MarkWh1te/xueqiu_predict", "id": "8e2357218538aae55ac26a3d21b848cb65c286be", "size": "4393", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "python3_env/lib/python3.4/site-packages/setupfiles/dist.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "19476" }, { "name": "CSS", "bytes": "12462" }, { "name": "HTML", "bytes": "31039" }, { "name": "JavaScript", "bytes": "13342" }, { "name": "Python", "bytes": "25631919" }, { "name": "Shell", "bytes": "6544" } ], "symlink_target": "" }
import csv, random import SonicScrewdriver as utils idlist = list() with open('religion.csv', encoding = 'utf-8') as f: reader = csv.DictReader(f) for row in reader: docid = row['docid'] idlist.append(docid) with open('religionids.txt', mode = 'w', encoding = 'utf-8') as f: for anid in idlist: f.write(anid + '\n')
{ "content_hash": "19dc8880c55c43881192c2d9401dd04b", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 66, "avg_line_length": 25.357142857142858, "alnum_prop": 0.6197183098591549, "repo_name": "tedunderwood/GenreProject", "id": "61a14eae534201d66dc09b84244d5924a2d528ed", "size": "355", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/reception/nonfic/getids.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "187389" }, { "name": "Python", "bytes": "645172" }, { "name": "R", "bytes": "34870" } ], "symlink_target": "" }
class QueueArray: def __init__(self): self.items = [] def isEmpty(self): return self.items == [] def enqueue(self, item): # We always enqueue into last position self.items.append(item) def dequeue(self): if not self.isEmpty(): # We always dequeue from first position return self.items.pop(0) else: print("The queue is empty. Can't dequeue.") return None def size(self): return len(self.items) def printQueue(self): print ("FIRST>", end=" ") for item in self.items: print(item, end=" ") print ("<LAST") if __name__ == "__main__": # execute only if run as a script, small demostration of working, just run 'python3 QueueArray.py' in a terminal queue = QueueArray() while True: print("What do you want to do?") print("\t1 - Enqueue") print("\t2 - Dequeue") print("\t3 - Check empty") print("\t4 - Check size") print("\t5 - Print queue") print("\t6 - Exit") option = input() print() if option == '1': item = input("Type your item ") queue.enqueue(item) print("Item enqueued successfully!\n") elif option == '2': item = queue.dequeue() print("Dequeue item" , item, "\n") elif option == '3': if queue.isEmpty(): print("Queue is empty\n") else: print("Queue is not empty\n") elif option == '4': print("The queue size is", queue.size(), "\n") elif option == '5': queue.printQueue() print() elif option == '6': print("Bye!") break else: print("Please, choose an option between 1 and 6\n")
{ "content_hash": "7ca38d536da986f32181065510fafb13", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 116, "avg_line_length": 29.359375, "alnum_prop": 0.4970729111229377, "repo_name": "CodersForLife/Data-Structures-Algorithms", "id": "dd38602d61d5e769b7891410380e1019ce41ee02", "size": "1982", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Arrays/QueueArray.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "3283" }, { "name": "C++", "bytes": "34708" }, { "name": "Go", "bytes": "449" }, { "name": "Java", "bytes": "27575" }, { "name": "JavaScript", "bytes": "3724" }, { "name": "Kotlin", "bytes": "163" }, { "name": "Python", "bytes": "12527" } ], "symlink_target": "" }
import os, sys import timeit import array import ROOT from alphatwirl.roottree import Events, BEvents # https://cp3.irmp.ucl.ac.be/projects/delphes/ticket/1039 ROOT.gInterpreter.Declare('#include "classes/DelphesClasses.h"') ROOT.gInterpreter.Declare('#include "external/ExRootAnalysis/ExRootTreeReader.h"') ROOT.gSystem.Load("libDelphes.so") ##__________________________________________________________________|| inputPath = '/hdfs/user/ds13962/delphes_jobs/job_20170417_001/QCD_HT1000to1500/0000/delphes.root' treeName = 'Delphes' ##__________________________________________________________________|| def use_Events(): inputFile = ROOT.TFile.Open(inputPath) tree = inputFile.Get(treeName) events = Events(tree) for event in events: for i in range(event.Jet.GetEntries()): event.Jet[i].PT ##__________________________________________________________________|| def use_Events_SetBranchStatus_var(): inputFile = ROOT.TFile.Open(inputPath) tree = inputFile.Get(treeName) tree.SetBranchStatus("*", 0) tree.SetBranchStatus("Jet.PT", 1) events = Events(tree) for event in events: for i in range(event.Jet.GetEntries()): event.Jet[i].PT ##__________________________________________________________________|| def use_Events_SetBranchStatus_obj(): inputFile = ROOT.TFile.Open(inputPath) tree = inputFile.Get(treeName) tree.SetBranchStatus("*", 0) tree.SetBranchStatus("Jet.*", 1) events = Events(tree) for event in events: for i in range(event.Jet.GetEntries()): event.Jet[i].PT ##__________________________________________________________________|| # https://github.com/delphes/delphes/blob/master/examples/Example1.py def use_ExRootTreeReader(): inputFile = ROOT.TFile.Open(inputPath) tree = inputFile.Get(treeName) treeReader = ROOT.ExRootTreeReader(tree) nentries = treeReader.GetEntries() branchJet = treeReader.UseBranch("Jet") for ientry in range(nentries): treeReader.ReadEntry(ientry) for i in range(branchJet.GetEntries()): branchJet[i].PT ##__________________________________________________________________|| ways = [ 'use_Events', 'use_Events_SetBranchStatus_var', 'use_Events_SetBranchStatus_obj', 'use_ExRootTreeReader' ] for w in ways: print w, ':', print timeit.timeit(w + '()', number = 1, setup = 'from __main__ import ' + w) ##__________________________________________________________________|| # use_Events : 4.99250912666 # use_Events_SetBranchStatus_var : 0.321241855621 # use_Events_SetBranchStatus_obj : 0.540351867676 # use_ExRootTreeReader : 0.495839118958 ##__________________________________________________________________||
{ "content_hash": "8852a786e3a0b494719616c80319a454", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 97, "avg_line_length": 34.936708860759495, "alnum_prop": 0.5467391304347826, "repo_name": "TaiSakuma/AlphaTwirl", "id": "77e504d4de255c67f485d51beecc108165e73795", "size": "2888", "binary": false, "copies": "1", "ref": "refs/heads/v0.9.x", "path": "tests/ROOT/performance_Delphes.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "562011" }, { "name": "R", "bytes": "1222" } ], "symlink_target": "" }
""" This dag autoscales your cluster. This only works with docker-compose (local) and Infrakit (swarm). For Infrakit, the following environment variables must be set: - INFRAKIT_IMAGE - what docker image to use for infrakit i.e.infrakit/devbundle:latest - INFRAKIT_GROUPS_URL - the location of the groups json file that defines the groups definition, i.e. https://github.com/wongwill86/examples/blob/master/latest/swarm/groups.json """ # noqa from airflow import DAG from datetime import datetime from airflow.operators.bash_operator import BashOperator from airflow.operators.python_operator import PythonOperator from airflow.operators.latest_only_operator import LatestOnlyOperator from airflow.utils.db import provide_session from airflow import models import requests import json import logging logger = logging.root.getChild(__name__) DAG_ID = 'z_manager_cluster_scaler' default_args = { 'owner': 'airflow', 'depends_on_past': False, 'start_date': datetime(2017, 5, 1), 'catchup': False, 'retries': 0, } SCHEDULE_INTERVAL = '* * * * *' dag = DAG( dag_id=DAG_ID, schedule_interval=SCHEDULE_INTERVAL, default_args=default_args, ) MANAGER_QUEUE = u'manager' QUEUE_SIZES_TASK_ID = 'queue_sizes' BRANCH_RESIZE_TASK_ID = 'branch_resize' RESCALE_TASK_ID = 'rescale_compose' QUEUE_URL = 'http://rabbitmq:15672/api/queues/%2f/{}' QUEUE_USERNAME = 'guest' QUEUE_PASSWORD = 'guest' # To use infrakit with > 1 queue, we will have to modify this code to use # separate groups file for each queue! templated_resize_command = """ {% set queue_sizes = task_instance.xcom_pull(task_ids=params.task_id) %} {% set docker_compose_command='docker-compose -f ' + conf.get('core', 'airflow_home') + '/deploy/docker-compose-CeleryExecutor.yml' + ' up -d --no-recreate --no-deps --no-build --no-color' %} {% set docker_infrakit_command='docker run --rm \ -v /var/run/docker.sock:/var/run/docker.sock -v /infrakit/:/infrakit \ -e INFRAKIT_HOME=/infrakit -e INFRAKIT_PLUGINS_DIR=/infrakit/plugins \ -e INFRAKIT_HOST=manager-cluster ${INFRAKIT_IMAGE} infrakit' %} if mount | grep '{{conf.get('core', 'airflow_home')}}/[dags|plugins]' > /dev/null; then echo 'Dag folder or plugin folder is mounted! Will not autoscale!' else echo 'Try to scale {{queue_sizes}}' {% if queue_sizes | length %} if [ -z "${{'{'}}INFRAKIT_IMAGE{{'}'}}" ]; then echo "Scaling local compose" {{docker_compose_command}} --scale \ {% for queue, size in queue_sizes.items() %} worker-{{queue}}={{size}} {% endfor %} else echo "Scaling infrakit" {% for queue, size in queue_sizes.items() %} if [ {{size}} -gt 0 ]; then if ! {{docker_infrakit_command}} group describe workers-{{queue}}; then echo 'Recommitting missing group...' {{docker_infrakit_command}} manager commit ${{'{'}}INFRAKIT_GROUPS_URL{{'}'}} else echo 'Group workers-{{queue}} already exists, no need to commit' fi echo 'Scaling...' {{docker_infrakit_command}} group scale workers-{{queue}} {{size}} else if {{docker_infrakit_command}} group describe workers-{{queue}}; then echo 'Destroying Group workers-{{queue}}' {{docker_infrakit_command}} group destroy workers-{{queue}} else echo 'Group workers-{{queue}} already destroyed' fi fi {% endfor %} fi {% endif %} fi """ # noqa @provide_session def find_queues(session=None): TI = models.TaskInstance query = ( session .query(TI.queue) .distinct(TI.queue) ) queues = query.all() return queues def get_queue_sizes(): queue_sizes = {} for queue in find_queues(): queue_name = queue[0] if queue_name == MANAGER_QUEUE: continue try: response = requests.get(QUEUE_URL.format(queue_name), auth=(QUEUE_USERNAME, QUEUE_PASSWORD)) stats = json.loads(response.text) size = stats['messages_ready'] + stats['messages_unacknowledged'] queue_sizes[queue_name] = size except Exception: logger.exception('No tasks found for %s', queue_name) queue_sizes[queue_name] = 0 return queue_sizes latest = LatestOnlyOperator( task_id='latest_only', queue='manager', dag=dag) queue_sizes_task = PythonOperator( task_id=QUEUE_SIZES_TASK_ID, python_callable=get_queue_sizes, queue="manager", dag=dag) rescale_task = BashOperator( task_id=RESCALE_TASK_ID, bash_command=templated_resize_command, queue="manager", params={'task_id': QUEUE_SIZES_TASK_ID}, dag=dag) latest.set_downstream(queue_sizes_task) queue_sizes_task.set_downstream(rescale_task)
{ "content_hash": "8e0d517e10e394487fe555cc45e8f42a", "timestamp": "", "source": "github", "line_count": 153, "max_line_length": 93, "avg_line_length": 32.03921568627451, "alnum_prop": 0.6383108935128519, "repo_name": "wongwill86/air-tasks", "id": "ae11912c63b99794f785d4d529b22d180ac7c785", "size": "4902", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dags/manager/scaler.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "35339" }, { "name": "Shell", "bytes": "4303" } ], "symlink_target": "" }
import sys import unittest from test import test_support import asyncore import socket import select import time import gc import os import errno import pprint import urllib, urlparse import traceback import weakref from BaseHTTPServer import HTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler # Optionally test SSL support, if we have it in the tested platform skip_expected = False try: import ssl except ImportError: skip_expected = True HOST = test_support.HOST CERTFILE = None SVN_PYTHON_ORG_ROOT_CERT = None def handle_error(prefix): exc_format = ' '.join(traceback.format_exception(*sys.exc_info())) if test_support.verbose: sys.stdout.write(prefix + exc_format) class BasicTests(unittest.TestCase): def test_sslwrap_simple(self): # A crude test for the legacy API try: ssl.sslwrap_simple(socket.socket(socket.AF_INET)) except IOError, e: if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that pass else: raise try: ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock) except IOError, e: if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that pass else: raise def test_constants(self): ssl.PROTOCOL_SSLv2 ssl.PROTOCOL_SSLv23 ssl.PROTOCOL_SSLv3 ssl.PROTOCOL_TLSv1 ssl.CERT_NONE ssl.CERT_OPTIONAL ssl.CERT_REQUIRED def test_random(self): v = ssl.RAND_status() if test_support.verbose: sys.stdout.write("\n RAND_status is %d (%s)\n" % (v, (v and "sufficient randomness") or "insufficient randomness")) try: ssl.RAND_egd(1) except TypeError: pass else: print "didn't raise TypeError" ssl.RAND_add("this is a random string", 75.0) def test_parse_cert(self): # note that this uses an 'unofficial' function in _ssl.c, # provided solely for this test, to exercise the certificate # parsing code p = ssl._ssl._test_decode_cert(CERTFILE, False) if test_support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") def test_DER_to_PEM(self): with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) d2 = ssl.PEM_cert_to_DER_cert(p2) self.assertEqual(d1, d2) if not p2.startswith(ssl.PEM_HEADER + '\n'): self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2) if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'): self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2) def test_openssl_version(self): n = ssl.OPENSSL_VERSION_NUMBER t = ssl.OPENSSL_VERSION_INFO s = ssl.OPENSSL_VERSION self.assertIsInstance(n, (int, long)) self.assertIsInstance(t, tuple) self.assertIsInstance(s, str) # Some sanity checks follow # >= 0.9 self.assertGreaterEqual(n, 0x900000) # < 2.0 self.assertLess(n, 0x20000000) major, minor, fix, patch, status = t self.assertGreaterEqual(major, 0) self.assertLess(major, 2) self.assertGreaterEqual(minor, 0) self.assertLess(minor, 256) self.assertGreaterEqual(fix, 0) self.assertLess(fix, 256) self.assertGreaterEqual(patch, 0) self.assertLessEqual(patch, 26) self.assertGreaterEqual(status, 0) self.assertLessEqual(status, 15) # Version string as returned by OpenSSL, the format might change self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)), (s, t)) def test_ciphers(self): if not test_support.is_resource_enabled('network'): return remote = ("svn.python.org", 443) s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL") s.connect(remote) s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") s.connect(remote) # Error checking occurs when connecting, because the SSL context # isn't created before. s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx") with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"): s.connect(remote) @test_support.cpython_only def test_refcycle(self): # Issue #7943: an SSL object doesn't create reference cycles with # itself. s = socket.socket(socket.AF_INET) ss = ssl.wrap_socket(s) wr = weakref.ref(ss) del ss self.assertEqual(wr(), None) class NetworkedTests(unittest.TestCase): def test_connect(self): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) s.connect(("svn.python.org", 443)) c = s.getpeercert() if c: self.fail("Peer cert %s shouldn't be here!") s.close() # this should fail because we have no verification certs s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) try: s.connect(("svn.python.org", 443)) except ssl.SSLError: pass finally: s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=SVN_PYTHON_ORG_ROOT_CERT) try: s.connect(("svn.python.org", 443)) finally: s.close() @unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows") def test_makefile_close(self): # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) ss.connect(("svn.python.org", 443)) fd = ss.fileno() f = ss.makefile() f.close() # The fd is still open os.read(fd, 0) # Closing the SSL socket should close the fd too ss.close() gc.collect() with self.assertRaises(OSError) as e: os.read(fd, 0) self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): s = socket.socket(socket.AF_INET) s.connect(("svn.python.org", 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, do_handshake_on_connect=False) count = 0 while True: try: count += 1 s.do_handshake() break except ssl.SSLError, err: if err.args[0] == ssl.SSL_ERROR_WANT_READ: select.select([s], [], []) elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: select.select([], [s], []) else: raise s.close() if test_support.verbose: sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count) def test_get_server_certificate(self): pem = ssl.get_server_certificate(("svn.python.org", 443)) if not pem: self.fail("No server certificate on svn.python.org:443!") try: pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=CERTFILE) except ssl.SSLError: #should fail pass else: self.fail("Got server certificate %s for svn.python.org!" % pem) pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT) if not pem: self.fail("No server certificate on svn.python.org:443!") if test_support.verbose: sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem) def test_algorithms(self): # Issue #8484: all algorithms should be available when verifying a # certificate. # SHA256 was added in OpenSSL 0.9.8 if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15): self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION) # NOTE: https://sha256.tbs-internet.com is another possible test host remote = ("sha2.hboeck.de", 443) sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem") s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=sha256_cert,) with test_support.transient_internet(): try: s.connect(remote) if test_support.verbose: sys.stdout.write("\nCipher with %r is %r\n" % (remote, s.cipher())) sys.stdout.write("Certificate is:\n%s\n" % pprint.pformat(s.getpeercert())) finally: s.close() try: import threading except ImportError: _have_threads = False else: _have_threads = True class ThreadedEchoServer(threading.Thread): class ConnectionHandler(threading.Thread): """A mildly complicated class, because we want it to work both with and without the SSL wrapper around the socket connection, so that we can test the STARTTLS functionality.""" def __init__(self, server, connsock): self.server = server self.running = False self.sock = connsock self.sock.setblocking(1) self.sslconn = None threading.Thread.__init__(self) self.daemon = True def show_conn_details(self): if self.server.certreqs == ssl.CERT_REQUIRED: cert = self.sslconn.getpeercert() if test_support.verbose and self.server.chatty: sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n") cert_binary = self.sslconn.getpeercert(True) if test_support.verbose and self.server.chatty: sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n") cipher = self.sslconn.cipher() if test_support.verbose and self.server.chatty: sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n") def wrap_conn(self): try: self.sslconn = ssl.wrap_socket(self.sock, server_side=True, certfile=self.server.certificate, ssl_version=self.server.protocol, ca_certs=self.server.cacerts, cert_reqs=self.server.certreqs, ciphers=self.server.ciphers) except ssl.SSLError: # XXX Various errors can have happened here, for example # a mismatching protocol version, an invalid certificate, # or a low-level bug. This should be made more discriminating. if self.server.chatty: handle_error("\n server: bad connection attempt from " + str(self.sock.getpeername()) + ":\n") self.close() self.running = False self.server.stop() return False else: return True def read(self): if self.sslconn: return self.sslconn.read() else: return self.sock.recv(1024) def write(self, bytes): if self.sslconn: return self.sslconn.write(bytes) else: return self.sock.send(bytes) def close(self): if self.sslconn: self.sslconn.close() else: self.sock._sock.close() def run(self): self.running = True if not self.server.starttls_server: if isinstance(self.sock, ssl.SSLSocket): self.sslconn = self.sock elif not self.wrap_conn(): return self.show_conn_details() while self.running: try: msg = self.read() if not msg: # eof, so quit this handler self.running = False self.close() elif msg.strip() == 'over': if test_support.verbose and self.server.connectionchatty: sys.stdout.write(" server: client closed connection\n") self.close() return elif self.server.starttls_server and msg.strip() == 'STARTTLS': if test_support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read STARTTLS from client, sending OK...\n") self.write("OK\n") if not self.wrap_conn(): return elif self.server.starttls_server and self.sslconn and msg.strip() == 'ENDTLS': if test_support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read ENDTLS from client, sending OK...\n") self.write("OK\n") self.sslconn.unwrap() self.sslconn = None if test_support.verbose and self.server.connectionchatty: sys.stdout.write(" server: connection is now unencrypted...\n") else: if (test_support.verbose and self.server.connectionchatty): ctype = (self.sslconn and "encrypted") or "unencrypted" sys.stdout.write(" server: read %s (%s), sending back %s (%s)...\n" % (repr(msg), ctype, repr(msg.lower()), ctype)) self.write(msg.lower()) except ssl.SSLError: if self.server.chatty: handle_error("Test server failure:\n") self.close() self.running = False # normally, we'd just stop here, but for the test # harness, we want to stop the server self.server.stop() def __init__(self, certificate, ssl_version=None, certreqs=None, cacerts=None, chatty=True, connectionchatty=False, starttls_server=False, wrap_accepting_socket=False, ciphers=None): if ssl_version is None: ssl_version = ssl.PROTOCOL_TLSv1 if certreqs is None: certreqs = ssl.CERT_NONE self.certificate = certificate self.protocol = ssl_version self.certreqs = certreqs self.cacerts = cacerts self.ciphers = ciphers self.chatty = chatty self.connectionchatty = connectionchatty self.starttls_server = starttls_server self.sock = socket.socket() self.flag = None if wrap_accepting_socket: self.sock = ssl.wrap_socket(self.sock, server_side=True, certfile=self.certificate, cert_reqs = self.certreqs, ca_certs = self.cacerts, ssl_version = self.protocol, ciphers = self.ciphers) if test_support.verbose and self.chatty: sys.stdout.write(' server: wrapped server socket as %s\n' % str(self.sock)) self.port = test_support.bind_port(self.sock) self.active = False threading.Thread.__init__(self) self.daemon = True def start(self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.sock.settimeout(0.05) self.sock.listen(5) self.active = True if self.flag: # signal an event self.flag.set() while self.active: try: newconn, connaddr = self.sock.accept() if test_support.verbose and self.chatty: sys.stdout.write(' server: new connection from ' + str(connaddr) + '\n') handler = self.ConnectionHandler(self, newconn) handler.start() except socket.timeout: pass except KeyboardInterrupt: self.stop() self.sock.close() def stop(self): self.active = False class AsyncoreEchoServer(threading.Thread): class EchoServer(asyncore.dispatcher): class ConnectionHandler(asyncore.dispatcher_with_send): def __init__(self, conn, certfile): asyncore.dispatcher_with_send.__init__(self, conn) self.socket = ssl.wrap_socket(conn, server_side=True, certfile=certfile, do_handshake_on_connect=False) self._ssl_accepting = True def readable(self): if isinstance(self.socket, ssl.SSLSocket): while self.socket.pending() > 0: self.handle_read_event() return True def _do_ssl_handshake(self): try: self.socket.do_handshake() except ssl.SSLError, err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return elif err.args[0] == ssl.SSL_ERROR_EOF: return self.handle_close() raise except socket.error, err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def handle_read(self): if self._ssl_accepting: self._do_ssl_handshake() else: data = self.recv(1024) if data and data.strip() != 'over': self.send(data.lower()) def handle_close(self): self.close() if test_support.verbose: sys.stdout.write(" server: closed connection %s\n" % self.socket) def handle_error(self): raise def __init__(self, certfile): self.certfile = certfile asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.port = test_support.bind_port(self.socket) self.listen(5) def handle_accept(self): sock_obj, addr = self.accept() if test_support.verbose: sys.stdout.write(" server: new connection from %s:%s\n" %addr) self.ConnectionHandler(sock_obj, self.certfile) def handle_error(self): raise def __init__(self, certfile): self.flag = None self.active = False self.server = self.EchoServer(certfile) self.port = self.server.port threading.Thread.__init__(self) self.daemon = True def __str__(self): return "<%s %s>" % (self.__class__.__name__, self.server) def start(self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.active = True if self.flag: self.flag.set() while self.active: asyncore.loop(0.05) def stop(self): self.active = False self.server.close() class SocketServerHTTPSServer(threading.Thread): class HTTPSServer(HTTPServer): def __init__(self, server_address, RequestHandlerClass, certfile): HTTPServer.__init__(self, server_address, RequestHandlerClass) # we assume the certfile contains both private key and certificate self.certfile = certfile self.allow_reuse_address = True def __str__(self): return ('<%s %s:%s>' % (self.__class__.__name__, self.server_name, self.server_port)) def get_request(self): # override this to wrap socket with SSL sock, addr = self.socket.accept() sslconn = ssl.wrap_socket(sock, server_side=True, certfile=self.certfile) return sslconn, addr class RootedHTTPRequestHandler(SimpleHTTPRequestHandler): # need to override translate_path to get a known root, # instead of using os.curdir, since the test could be # run from anywhere server_version = "TestHTTPS/1.0" root = None def translate_path(self, path): """Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. (XXX They should probably be diagnosed.) """ # abandon query parameters path = urlparse.urlparse(path)[2] path = os.path.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) path = self.root for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in self.root: continue path = os.path.join(path, word) return path def log_message(self, format, *args): # we override this to suppress logging unless "verbose" if test_support.verbose: sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" % (self.server.server_address, self.server.server_port, self.request.cipher(), self.log_date_time_string(), format%args)) def __init__(self, certfile): self.flag = None self.RootedHTTPRequestHandler.root = os.path.split(CERTFILE)[0] self.server = self.HTTPSServer( (HOST, 0), self.RootedHTTPRequestHandler, certfile) self.port = self.server.server_port threading.Thread.__init__(self) self.daemon = True def __str__(self): return "<%s %s>" % (self.__class__.__name__, self.server) def start(self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): if self.flag: self.flag.set() self.server.serve_forever(0.05) def stop(self): self.server.shutdown() def bad_cert_test(certfile): """ Launch a server with CERT_REQUIRED, and check that trying to connect to it with the given client certificate fails. """ server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_REQUIRED, cacerts=CERTFILE, chatty=False) flag = threading.Event() server.start(flag) # wait for it to start flag.wait() # try to connect try: try: s = ssl.wrap_socket(socket.socket(), certfile=certfile, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) except ssl.SSLError, x: if test_support.verbose: sys.stdout.write("\nSSLError is %s\n" % x[1]) except socket.error, x: if test_support.verbose: sys.stdout.write("\nsocket.error is %s\n" % x[1]) else: raise AssertionError("Use of invalid cert should have failed!") finally: server.stop() server.join() def server_params_test(certfile, protocol, certreqs, cacertsfile, client_certfile, client_protocol=None, indata="FOO\n", ciphers=None, chatty=True, connectionchatty=False, wrap_accepting_socket=False): """ Launch a server, connect a client to it and try various reads and writes. """ server = ThreadedEchoServer(certfile, certreqs=certreqs, ssl_version=protocol, cacerts=cacertsfile, ciphers=ciphers, chatty=chatty, connectionchatty=connectionchatty, wrap_accepting_socket=wrap_accepting_socket) flag = threading.Event() server.start(flag) # wait for it to start flag.wait() # try to connect if client_protocol is None: client_protocol = protocol try: s = ssl.wrap_socket(socket.socket(), certfile=client_certfile, ca_certs=cacertsfile, ciphers=ciphers, cert_reqs=certreqs, ssl_version=client_protocol) s.connect((HOST, server.port)) for arg in [indata, bytearray(indata), memoryview(indata)]: if connectionchatty: if test_support.verbose: sys.stdout.write( " client: sending %s...\n" % (repr(arg))) s.write(arg) outdata = s.read() if connectionchatty: if test_support.verbose: sys.stdout.write(" client: read %s\n" % repr(outdata)) if outdata != indata.lower(): raise AssertionError( "bad data <<%s>> (%d) received; expected <<%s>> (%d)\n" % (outdata[:min(len(outdata),20)], len(outdata), indata[:min(len(indata),20)].lower(), len(indata))) s.write("over\n") if connectionchatty: if test_support.verbose: sys.stdout.write(" client: closing connection.\n") s.close() finally: server.stop() server.join() def try_protocol_combo(server_protocol, client_protocol, expect_success, certsreqs=None): if certsreqs is None: certsreqs = ssl.CERT_NONE certtype = { ssl.CERT_NONE: "CERT_NONE", ssl.CERT_OPTIONAL: "CERT_OPTIONAL", ssl.CERT_REQUIRED: "CERT_REQUIRED", }[certsreqs] if test_support.verbose: formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n" sys.stdout.write(formatstr % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol), certtype)) try: # NOTE: we must enable "ALL" ciphers, otherwise an SSLv23 client # will send an SSLv3 hello (rather than SSLv2) starting from # OpenSSL 1.0.0 (see issue #8322). server_params_test(CERTFILE, server_protocol, certsreqs, CERTFILE, CERTFILE, client_protocol, ciphers="ALL", chatty=False) # Protocol mismatch can result in either an SSLError, or a # "Connection reset by peer" error. except ssl.SSLError: if expect_success: raise except socket.error as e: if expect_success or e.errno != errno.ECONNRESET: raise else: if not expect_success: raise AssertionError( "Client protocol %s succeeded with server protocol %s!" % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol))) class ThreadedTests(unittest.TestCase): def test_rude_shutdown(self): """A brutal shutdown of an SSL server should raise an IOError in the client when attempting handshake. """ listener_ready = threading.Event() listener_gone = threading.Event() s = socket.socket() port = test_support.bind_port(s, HOST) # `listener` runs in a thread. It sits in an accept() until # the main thread connects. Then it rudely closes the socket, # and sets Event `listener_gone` to let the main thread know # the socket is gone. def listener(): s.listen(5) listener_ready.set() s.accept() s.close() listener_gone.set() def connector(): listener_ready.wait() c = socket.socket() c.connect((HOST, port)) listener_gone.wait() try: ssl_sock = ssl.wrap_socket(c) except IOError: pass else: self.fail('connecting to closed SSL socket should have failed') t = threading.Thread(target=listener) t.start() try: connector() finally: t.join() def test_echo(self): """Basic test of an SSL client connecting to a server""" if test_support.verbose: sys.stdout.write("\n") server_params_test(CERTFILE, ssl.PROTOCOL_TLSv1, ssl.CERT_NONE, CERTFILE, CERTFILE, ssl.PROTOCOL_TLSv1, chatty=True, connectionchatty=True) def test_getpeercert(self): if test_support.verbose: sys.stdout.write("\n") s2 = socket.socket() server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_SSLv23, cacerts=CERTFILE, chatty=False) flag = threading.Event() server.start(flag) # wait for it to start flag.wait() # try to connect try: s = ssl.wrap_socket(socket.socket(), certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_REQUIRED, ssl_version=ssl.PROTOCOL_SSLv23) s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") cipher = s.cipher() if test_support.verbose: sys.stdout.write(pprint.pformat(cert) + '\n') sys.stdout.write("Connection cipher is " + str(cipher) + '.\n') if 'subject' not in cert: self.fail("No subject field in certificate: %s." % pprint.pformat(cert)) if ((('organizationName', 'Python Software Foundation'),) not in cert['subject']): self.fail( "Missing or invalid 'organizationName' field in certificate subject; " "should be 'Python Software Foundation'.") s.close() finally: server.stop() server.join() def test_empty_cert(self): """Connecting with an empty cert file""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "nullcert.pem")) def test_malformed_cert(self): """Connecting with a badly formatted certificate (syntax error)""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "badcert.pem")) def test_nonexisting_cert(self): """Connecting with a non-existing cert file""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "wrongcert.pem")) def test_malformed_key(self): """Connecting with a badly formatted key (syntax error)""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "badkey.pem")) def test_protocol_sslv2(self): """Connecting to an SSLv2 server with various client options""" if test_support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) def test_protocol_sslv23(self): """Connecting to an SSLv23 server with various client options""" if test_support.verbose: sys.stdout.write("\n") try: try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True) except (ssl.SSLError, socket.error), x: # this fails on some older versions of OpenSSL (0.9.7l, for instance) if test_support.verbose: sys.stdout.write( " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n" % str(x)) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) def test_protocol_sslv3(self): """Connecting to an SSLv3 server with various client options""" if test_support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) def test_protocol_tlsv1(self): """Connecting to a TLSv1 server with various client options""" if test_support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False) def test_starttls(self): """Switching from clear text to encrypted and back again.""" msgs = ("msg 1", "MSG 2", "STARTTLS", "MSG 3", "msg 4", "ENDTLS", "msg 5", "msg 6") server = ThreadedEchoServer(CERTFILE, ssl_version=ssl.PROTOCOL_TLSv1, starttls_server=True, chatty=True, connectionchatty=True) flag = threading.Event() server.start(flag) # wait for it to start flag.wait() # try to connect wrapped = False try: s = socket.socket() s.setblocking(1) s.connect((HOST, server.port)) if test_support.verbose: sys.stdout.write("\n") for indata in msgs: if test_support.verbose: sys.stdout.write( " client: sending %s...\n" % repr(indata)) if wrapped: conn.write(indata) outdata = conn.read() else: s.send(indata) outdata = s.recv(1024) if (indata == "STARTTLS" and outdata.strip().lower().startswith("ok")): # STARTTLS ok, switch to secure mode if test_support.verbose: sys.stdout.write( " client: read %s from server, starting TLS...\n" % repr(outdata)) conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1) wrapped = True elif (indata == "ENDTLS" and outdata.strip().lower().startswith("ok")): # ENDTLS ok, switch back to clear text if test_support.verbose: sys.stdout.write( " client: read %s from server, ending TLS...\n" % repr(outdata)) s = conn.unwrap() wrapped = False else: if test_support.verbose: sys.stdout.write( " client: read %s from server\n" % repr(outdata)) if test_support.verbose: sys.stdout.write(" client: closing connection.\n") if wrapped: conn.write("over\n") else: s.send("over\n") s.close() finally: server.stop() server.join() def test_socketserver(self): """Using a SocketServer to create and manage SSL connections.""" server = SocketServerHTTPSServer(CERTFILE) flag = threading.Event() server.start(flag) # wait for it to start flag.wait() # try to connect try: if test_support.verbose: sys.stdout.write('\n') with open(CERTFILE, 'rb') as f: d1 = f.read() d2 = '' # now fetch the same data from the HTTPS server url = 'https://127.0.0.1:%d/%s' % ( server.port, os.path.split(CERTFILE)[1]) with test_support.check_py3k_warnings(): f = urllib.urlopen(url) dlen = f.info().getheader("content-length") if dlen and (int(dlen) > 0): d2 = f.read(int(dlen)) if test_support.verbose: sys.stdout.write( " client: read %d bytes from remote server '%s'\n" % (len(d2), server)) f.close() self.assertEqual(d1, d2) finally: server.stop() server.join() def test_wrapped_accept(self): """Check the accept() method on SSL sockets.""" if test_support.verbose: sys.stdout.write("\n") server_params_test(CERTFILE, ssl.PROTOCOL_SSLv23, ssl.CERT_REQUIRED, CERTFILE, CERTFILE, ssl.PROTOCOL_SSLv23, chatty=True, connectionchatty=True, wrap_accepting_socket=True) def test_asyncore_server(self): """Check the example asyncore integration.""" indata = "TEST MESSAGE of mixed case\n" if test_support.verbose: sys.stdout.write("\n") server = AsyncoreEchoServer(CERTFILE) flag = threading.Event() server.start(flag) # wait for it to start flag.wait() # try to connect try: s = ssl.wrap_socket(socket.socket()) s.connect(('127.0.0.1', server.port)) if test_support.verbose: sys.stdout.write( " client: sending %s...\n" % (repr(indata))) s.write(indata) outdata = s.read() if test_support.verbose: sys.stdout.write(" client: read %s\n" % repr(outdata)) if outdata != indata.lower(): self.fail( "bad data <<%s>> (%d) received; expected <<%s>> (%d)\n" % (outdata[:min(len(outdata),20)], len(outdata), indata[:min(len(indata),20)].lower(), len(indata))) s.write("over\n") if test_support.verbose: sys.stdout.write(" client: closing connection.\n") s.close() finally: server.stop() # wait for server thread to end server.join() def test_recv_send(self): """Test recv(), send() and friends.""" if test_support.verbose: sys.stdout.write("\n") server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1, cacerts=CERTFILE, chatty=True, connectionchatty=False) flag = threading.Event() server.start(flag) # wait for it to start flag.wait() # try to connect s = ssl.wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) try: # helper methods for standardising recv* method signatures def _recv_into(): b = bytearray("\0"*100) count = s.recv_into(b) return b[:count] def _recvfrom_into(): b = bytearray("\0"*100) count, addr = s.recvfrom_into(b) return b[:count] # (name, method, whether to expect success, *args) send_methods = [ ('send', s.send, True, []), ('sendto', s.sendto, False, ["some.address"]), ('sendall', s.sendall, True, []), ] recv_methods = [ ('recv', s.recv, True, []), ('recvfrom', s.recvfrom, False, ["some.address"]), ('recv_into', _recv_into, True, []), ('recvfrom_into', _recvfrom_into, False, []), ] data_prefix = u"PREFIX_" for meth_name, send_meth, expect_success, args in send_methods: indata = data_prefix + meth_name try: send_meth(indata.encode('ASCII', 'strict'), *args) outdata = s.read() outdata = outdata.decode('ASCII', 'strict') if outdata != indata.lower(): self.fail( "While sending with <<%s>> bad data " "<<%r>> (%d) received; " "expected <<%r>> (%d)\n" % ( meth_name, outdata[:20], len(outdata), indata[:20], len(indata) ) ) except ValueError as e: if expect_success: self.fail( "Failed to send with method <<%s>>; " "expected to succeed.\n" % (meth_name,) ) if not str(e).startswith(meth_name): self.fail( "Method <<%s>> failed with unexpected " "exception message: %s\n" % ( meth_name, e ) ) for meth_name, recv_meth, expect_success, args in recv_methods: indata = data_prefix + meth_name try: s.send(indata.encode('ASCII', 'strict')) outdata = recv_meth(*args) outdata = outdata.decode('ASCII', 'strict') if outdata != indata.lower(): self.fail( "While receiving with <<%s>> bad data " "<<%r>> (%d) received; " "expected <<%r>> (%d)\n" % ( meth_name, outdata[:20], len(outdata), indata[:20], len(indata) ) ) except ValueError as e: if expect_success: self.fail( "Failed to receive with method <<%s>>; " "expected to succeed.\n" % (meth_name,) ) if not str(e).startswith(meth_name): self.fail( "Method <<%s>> failed with unexpected " "exception message: %s\n" % ( meth_name, e ) ) # consume data s.read() s.write("over\n".encode("ASCII", "strict")) s.close() finally: server.stop() server.join() def test_handshake_timeout(self): # Issue #5103: SSL handshake must respect the socket timeout server = socket.socket(socket.AF_INET) host = "127.0.0.1" port = test_support.bind_port(server) started = threading.Event() finish = False def serve(): server.listen(5) started.set() conns = [] while not finish: r, w, e = select.select([server], [], [], 0.1) if server in r: # Let the socket hang around rather than having # it closed by garbage collection. conns.append(server.accept()[0]) t = threading.Thread(target=serve) t.start() started.wait() try: try: c = socket.socket(socket.AF_INET) c.settimeout(0.2) c.connect((host, port)) # Will attempt handshake and time out self.assertRaisesRegexp(ssl.SSLError, "timed out", ssl.wrap_socket, c) finally: c.close() try: c = socket.socket(socket.AF_INET) c.settimeout(0.2) c = ssl.wrap_socket(c) # Will attempt handshake and time out self.assertRaisesRegexp(ssl.SSLError, "timed out", c.connect, (host, port)) finally: c.close() finally: finish = True t.join() server.close() def test_main(verbose=False): if skip_expected: raise unittest.SkipTest("No SSL support") global CERTFILE, SVN_PYTHON_ORG_ROOT_CERT CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert.pem") SVN_PYTHON_ORG_ROOT_CERT = os.path.join( os.path.dirname(__file__) or os.curdir, "https_svn_python_org_root.pem") if (not os.path.exists(CERTFILE) or not os.path.exists(SVN_PYTHON_ORG_ROOT_CERT)): raise test_support.TestFailed("Can't read certificate files!") tests = [BasicTests] if test_support.is_resource_enabled('network'): tests.append(NetworkedTests) if _have_threads: thread_info = test_support.threading_setup() if thread_info and test_support.is_resource_enabled('network'): tests.append(ThreadedTests) try: test_support.run_unittest(*tests) finally: if _have_threads: test_support.threading_cleanup(*thread_info) if __name__ == "__main__": test_main()
{ "content_hash": "6bb9877ec60df99c7e4d0273e00c249c", "timestamp": "", "source": "github", "line_count": 1266, "max_line_length": 107, "avg_line_length": 42.17219589257504, "alnum_prop": 0.47688705750140475, "repo_name": "MalloyPower/parsing-python", "id": "813968ead6ff36758254638b4ac5250c91412eff", "size": "53430", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "front-end/testsuite-python-lib/Python-2.7/Lib/test/test_ssl.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1963" }, { "name": "Lex", "bytes": "238458" }, { "name": "Makefile", "bytes": "4513" }, { "name": "OCaml", "bytes": "412695" }, { "name": "Python", "bytes": "17319" }, { "name": "Rascal", "bytes": "523063" }, { "name": "Yacc", "bytes": "429659" } ], "symlink_target": "" }
import os import socket import uuid import eventlet import netaddr from oslo.config import cfg from quantum.agent.common import config from quantum.agent.linux import dhcp from quantum.agent.linux import external_process from quantum.agent.linux import interface from quantum.agent.linux import ip_lib from quantum.agent import rpc as agent_rpc from quantum.common import constants from quantum.common import exceptions from quantum.common import topics from quantum import context from quantum import manager from quantum.openstack.common import importutils from quantum.openstack.common import jsonutils from quantum.openstack.common import lockutils from quantum.openstack.common import log as logging from quantum.openstack.common import loopingcall from quantum.openstack.common.rpc import proxy from quantum.openstack.common import service from quantum.openstack.common import uuidutils from quantum import service as quantum_service LOG = logging.getLogger(__name__) NS_PREFIX = 'qdhcp-' METADATA_DEFAULT_PREFIX = 16 METADATA_DEFAULT_IP = '169.254.169.254/%d' % METADATA_DEFAULT_PREFIX METADATA_PORT = 80 class DhcpAgent(manager.Manager): OPTS = [ cfg.IntOpt('resync_interval', default=5, help=_("Interval to resync.")), cfg.StrOpt('dhcp_driver', default='quantum.agent.linux.dhcp.Dnsmasq', help=_("The driver used to manage the DHCP server.")), cfg.BoolOpt('use_namespaces', default=True, help=_("Allow overlapping IP.")), cfg.BoolOpt('enable_isolated_metadata', default=False, help=_("Support Metadata requests on isolated networks.")), cfg.BoolOpt('enable_metadata_network', default=False, help=_("Allows for serving metadata requests from a " "dedicate network. Requires " "enable isolated_metadata = True ")), ] def __init__(self, host=None): super(DhcpAgent, self).__init__(host=host) self.needs_resync = False self.conf = cfg.CONF self.cache = NetworkCache() self.root_helper = config.get_root_helper(self.conf) self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver) ctx = context.get_admin_context_without_session() self.plugin_rpc = DhcpPluginApi(topics.PLUGIN, ctx) self.device_manager = DeviceManager(self.conf, self.plugin_rpc) self.lease_relay = DhcpLeaseRelay(self.update_lease) self._populate_networks_cache() def _populate_networks_cache(self): """Populate the networks cache when the DHCP-agent starts.""" try: existing_networks = self.dhcp_driver_cls.existing_dhcp_networks( self.conf, self.root_helper ) for net_id in existing_networks: net = DictModel({"id": net_id, "subnets": [], "ports": []}) self.cache.put(net) except NotImplementedError: # just go ahead with an empty networks cache LOG.debug( _("The '%s' DHCP-driver does not support retrieving of a " "list of existing networks"), self.conf.dhcp_driver ) def after_start(self): self.run() LOG.info(_("DHCP agent started")) def run(self): """Activate the DHCP agent.""" self.sync_state() self.periodic_resync() self.lease_relay.start() def _ns_name(self, network): if self.conf.use_namespaces: return NS_PREFIX + network.id def call_driver(self, action, network): """Invoke an action on a DHCP driver instance.""" try: # the Driver expects something that is duck typed similar to # the base models. driver = self.dhcp_driver_cls(self.conf, network, self.root_helper, self.device_manager, self._ns_name(network)) getattr(driver, action)() return True except Exception: self.needs_resync = True LOG.exception(_('Unable to %s dhcp.'), action) def update_lease(self, network_id, ip_address, time_remaining): try: self.plugin_rpc.update_lease_expiration(network_id, ip_address, time_remaining) except Exception: self.needs_resync = True LOG.exception(_('Unable to update lease')) def sync_state(self): """Sync the local DHCP state with Quantum.""" LOG.info(_('Synchronizing state')) known_networks = set(self.cache.get_network_ids()) try: active_networks = set(self.plugin_rpc.get_active_networks()) for deleted_id in known_networks - active_networks: self.disable_dhcp_helper(deleted_id) for network_id in active_networks: self.refresh_dhcp_helper(network_id) except Exception: self.needs_resync = True LOG.exception(_('Unable to sync network state.')) def _periodic_resync_helper(self): """Resync the dhcp state at the configured interval.""" while True: eventlet.sleep(self.conf.resync_interval) if self.needs_resync: self.needs_resync = False self.sync_state() def periodic_resync(self): """Spawn a thread to periodically resync the dhcp state.""" eventlet.spawn(self._periodic_resync_helper) def enable_dhcp_helper(self, network_id): """Enable DHCP for a network that meets enabling criteria.""" try: network = self.plugin_rpc.get_network_info(network_id) except Exception: self.needs_resync = True LOG.exception(_('Network %s RPC info call failed.'), network_id) return if not network.admin_state_up: return for subnet in network.subnets: if subnet.enable_dhcp: if self.call_driver('enable', network): if self.conf.use_namespaces: self.enable_isolated_metadata_proxy(network) self.cache.put(network) break def disable_dhcp_helper(self, network_id): """Disable DHCP for a network known to the agent.""" network = self.cache.get_network_by_id(network_id) if network: if self.conf.use_namespaces: self.disable_isolated_metadata_proxy(network) if self.call_driver('disable', network): self.cache.remove(network) def refresh_dhcp_helper(self, network_id): """Refresh or disable DHCP for a network depending on the current state of the network. """ old_network = self.cache.get_network_by_id(network_id) if not old_network: # DHCP current not running for network. return self.enable_dhcp_helper(network_id) try: network = self.plugin_rpc.get_network_info(network_id) except Exception: self.needs_resync = True LOG.exception(_('Network %s RPC info call failed.'), network_id) return old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp) new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp) if new_cidrs and old_cidrs == new_cidrs: self.call_driver('reload_allocations', network) self.cache.put(network) elif new_cidrs: if self.call_driver('restart', network): self.cache.put(network) else: self.disable_dhcp_helper(network.id) @lockutils.synchronized('agent', 'dhcp-') def network_create_end(self, context, payload): """Handle the network.create.end notification event.""" network_id = payload['network']['id'] self.enable_dhcp_helper(network_id) @lockutils.synchronized('agent', 'dhcp-') def network_update_end(self, context, payload): """Handle the network.update.end notification event.""" network_id = payload['network']['id'] if payload['network']['admin_state_up']: self.enable_dhcp_helper(network_id) else: self.disable_dhcp_helper(network_id) @lockutils.synchronized('agent', 'dhcp-') def network_delete_end(self, context, payload): """Handle the network.delete.end notification event.""" self.disable_dhcp_helper(payload['network_id']) @lockutils.synchronized('agent', 'dhcp-') def subnet_update_end(self, context, payload): """Handle the subnet.update.end notification event.""" network_id = payload['subnet']['network_id'] self.refresh_dhcp_helper(network_id) # Use the update handler for the subnet create event. subnet_create_end = subnet_update_end @lockutils.synchronized('agent', 'dhcp-') def subnet_delete_end(self, context, payload): """Handle the subnet.delete.end notification event.""" subnet_id = payload['subnet_id'] network = self.cache.get_network_by_subnet_id(subnet_id) if network: self.refresh_dhcp_helper(network.id) @lockutils.synchronized('agent', 'dhcp-') def port_update_end(self, context, payload): """Handle the port.update.end notification event.""" port = DictModel(payload['port']) network = self.cache.get_network_by_id(port.network_id) if network: self.cache.put_port(port) self.call_driver('reload_allocations', network) # Use the update handler for the port create event. port_create_end = port_update_end @lockutils.synchronized('agent', 'dhcp-') def port_delete_end(self, context, payload): """Handle the port.delete.end notification event.""" port = self.cache.get_port_by_id(payload['port_id']) if port: network = self.cache.get_network_by_id(port.network_id) self.cache.remove_port(port) self.call_driver('reload_allocations', network) def enable_isolated_metadata_proxy(self, network): # The proxy might work for either a single network # or all the networks connected via a router # to the one passed as a parameter quantum_lookup_param = '--network_id=%s' % network.id meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP) has_metadata_subnet = any(netaddr.IPNetwork(s.cidr) in meta_cidr for s in network.subnets) if (self.conf.enable_metadata_network and has_metadata_subnet): router_ports = [port for port in network.ports if (port.device_owner == constants.DEVICE_OWNER_ROUTER_INTF)] if router_ports: # Multiple router ports should not be allowed if len(router_ports) > 1: LOG.warning(_("%(port_num)d router ports found on the " "metadata access network. Only the port " "%(port_id)s, for router %(router_id)s " "will be considered"), {'port_num': len(router_ports), 'port_id': router_ports[0].id, 'router_id': router_ports[0].device_id}) quantum_lookup_param = ('--router_id=%s' % router_ports[0].device_id) def callback(pid_file): proxy_cmd = ['quantum-ns-metadata-proxy', '--pid_file=%s' % pid_file, quantum_lookup_param, '--state_path=%s' % self.conf.state_path, '--metadata_port=%d' % METADATA_PORT] proxy_cmd.extend(config.get_log_args( cfg.CONF, 'quantum-ns-metadata-proxy-%s.log' % network.id)) return proxy_cmd pm = external_process.ProcessManager( self.conf, network.id, self.conf.root_helper, self._ns_name(network)) pm.enable(callback) def disable_isolated_metadata_proxy(self, network): pm = external_process.ProcessManager( self.conf, network.id, self.conf.root_helper, self._ns_name(network)) pm.disable() class DhcpPluginApi(proxy.RpcProxy): """Agent side of the dhcp rpc API. API version history: 1.0 - Initial version. """ BASE_RPC_API_VERSION = '1.0' def __init__(self, topic, context): super(DhcpPluginApi, self).__init__( topic=topic, default_version=self.BASE_RPC_API_VERSION) self.context = context self.host = cfg.CONF.host def get_active_networks(self): """Make a remote process call to retrieve the active networks.""" return self.call(self.context, self.make_msg('get_active_networks', host=self.host), topic=self.topic) def get_network_info(self, network_id): """Make a remote process call to retrieve network info.""" return DictModel(self.call(self.context, self.make_msg('get_network_info', network_id=network_id, host=self.host), topic=self.topic)) def get_dhcp_port(self, network_id, device_id): """Make a remote process call to create the dhcp port.""" return DictModel(self.call(self.context, self.make_msg('get_dhcp_port', network_id=network_id, device_id=device_id, host=self.host), topic=self.topic)) def release_dhcp_port(self, network_id, device_id): """Make a remote process call to release the dhcp port.""" return self.call(self.context, self.make_msg('release_dhcp_port', network_id=network_id, device_id=device_id, host=self.host), topic=self.topic) def release_port_fixed_ip(self, network_id, device_id, subnet_id): """Make a remote process call to release a fixed_ip on the port.""" return self.call(self.context, self.make_msg('release_port_fixed_ip', network_id=network_id, subnet_id=subnet_id, device_id=device_id, host=self.host), topic=self.topic) def update_lease_expiration(self, network_id, ip_address, lease_remaining): """Make a remote process call to update the ip lease expiration.""" self.cast(self.context, self.make_msg('update_lease_expiration', network_id=network_id, ip_address=ip_address, lease_remaining=lease_remaining, host=self.host), topic=self.topic) class NetworkCache(object): """Agent cache of the current network state.""" def __init__(self): self.cache = {} self.subnet_lookup = {} self.port_lookup = {} def get_network_ids(self): return self.cache.keys() def get_network_by_id(self, network_id): return self.cache.get(network_id) def get_network_by_subnet_id(self, subnet_id): return self.cache.get(self.subnet_lookup.get(subnet_id)) def get_network_by_port_id(self, port_id): return self.cache.get(self.port_lookup.get(port_id)) def put(self, network): if network.id in self.cache: self.remove(self.cache[network.id]) self.cache[network.id] = network for subnet in network.subnets: self.subnet_lookup[subnet.id] = network.id for port in network.ports: self.port_lookup[port.id] = network.id def remove(self, network): del self.cache[network.id] for subnet in network.subnets: del self.subnet_lookup[subnet.id] for port in network.ports: del self.port_lookup[port.id] def put_port(self, port): network = self.get_network_by_id(port.network_id) for index in range(len(network.ports)): if network.ports[index].id == port.id: network.ports[index] = port break else: network.ports.append(port) self.port_lookup[port.id] = network.id def remove_port(self, port): network = self.get_network_by_port_id(port.id) for index in range(len(network.ports)): if network.ports[index] == port: del network.ports[index] del self.port_lookup[port.id] break def get_port_by_id(self, port_id): network = self.get_network_by_port_id(port_id) if network: for port in network.ports: if port.id == port_id: return port def get_state(self): net_ids = self.get_network_ids() num_nets = len(net_ids) num_subnets = 0 num_ports = 0 for net_id in net_ids: network = self.get_network_by_id(net_id) num_subnets += len(network.subnets) num_ports += len(network.ports) return {'networks': num_nets, 'subnets': num_subnets, 'ports': num_ports} class DeviceManager(object): OPTS = [ cfg.StrOpt('interface_driver', help=_("The driver used to manage the virtual interface.")) ] def __init__(self, conf, plugin): self.conf = conf self.root_helper = config.get_root_helper(conf) self.plugin = plugin if not conf.interface_driver: raise SystemExit(_('You must specify an interface driver')) try: self.driver = importutils.import_object(conf.interface_driver, conf) except Exception: msg = _("Error importing interface driver " "'%s'") % conf.interface_driver raise SystemExit(msg) def get_interface_name(self, network, port=None): """Return interface(device) name for use by the DHCP process.""" if not port: device_id = self.get_device_id(network) port = self.plugin.get_dhcp_port(network.id, device_id) return self.driver.get_device_name(port) def get_device_id(self, network): """Return a unique DHCP device ID for this host on the network.""" # There could be more than one dhcp server per network, so create # a device id that combines host and network ids host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname()) return 'dhcp%s-%s' % (host_uuid, network.id) def setup(self, network, reuse_existing=False): """Create and initialize a device for network's DHCP on this host.""" device_id = self.get_device_id(network) port = self.plugin.get_dhcp_port(network.id, device_id) interface_name = self.get_interface_name(network, port) if self.conf.use_namespaces: namespace = NS_PREFIX + network.id else: namespace = None if ip_lib.device_exists(interface_name, self.root_helper, namespace): if not reuse_existing: raise exceptions.PreexistingDeviceFailure( dev_name=interface_name) LOG.debug(_('Reusing existing device: %s.'), interface_name) else: self.driver.plug(network.id, port.id, interface_name, port.mac_address, namespace=namespace) ip_cidrs = [] for fixed_ip in port.fixed_ips: subnet = fixed_ip.subnet net = netaddr.IPNetwork(subnet.cidr) ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) ip_cidrs.append(ip_cidr) if (self.conf.enable_isolated_metadata and self.conf.use_namespaces): ip_cidrs.append(METADATA_DEFAULT_IP) self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace) # ensure that the dhcp interface is first in the list if namespace is None: device = ip_lib.IPDevice(interface_name, self.root_helper) device.route.pullup_route(interface_name) if self.conf.enable_metadata_network: meta_cidr = netaddr.IPNetwork(METADATA_DEFAULT_IP) metadata_subnets = [s for s in network.subnets if netaddr.IPNetwork(s.cidr) in meta_cidr] if metadata_subnets: # Add a gateway so that packets can be routed back to VMs device = ip_lib.IPDevice(interface_name, self.root_helper, namespace) # Only 1 subnet on metadata access network gateway_ip = metadata_subnets[0].gateway_ip device.route.add_gateway(gateway_ip) return interface_name def destroy(self, network, device_name): """Destroy the device used for the network's DHCP on this host.""" if self.conf.use_namespaces: namespace = NS_PREFIX + network.id else: namespace = None self.driver.unplug(device_name, namespace=namespace) self.plugin.release_dhcp_port(network.id, self.get_device_id(network)) class DictModel(object): """Convert dict into an object that provides attribute access to values.""" def __init__(self, d): for key, value in d.iteritems(): if isinstance(value, list): value = [DictModel(item) if isinstance(item, dict) else item for item in value] elif isinstance(value, dict): value = DictModel(value) setattr(self, key, value) class DhcpLeaseRelay(object): """UNIX domain socket server for processing lease updates. Network namespace isolation prevents the DHCP process from notifying Quantum directly. This class works around the limitation by using the domain socket to pass the information. This class handles message. receiving and then calls the callback method. """ OPTS = [ cfg.StrOpt('dhcp_lease_relay_socket', default='$state_path/dhcp/lease_relay', help=_('Location to DHCP lease relay UNIX domain socket')) ] def __init__(self, lease_update_callback): self.callback = lease_update_callback dirname = os.path.dirname(cfg.CONF.dhcp_lease_relay_socket) if os.path.isdir(dirname): try: os.unlink(cfg.CONF.dhcp_lease_relay_socket) except OSError: if os.path.exists(cfg.CONF.dhcp_lease_relay_socket): raise else: os.makedirs(dirname, 0755) def _handler(self, client_sock, client_addr): """Handle incoming lease relay stream connection. This method will only read the first 1024 bytes and then close the connection. The limit exists to limit the impact of misbehaving clients. """ try: msg = client_sock.recv(1024) data = jsonutils.loads(msg) client_sock.close() network_id = data['network_id'] if not uuidutils.is_uuid_like(network_id): raise ValueError(_("Network ID %s is not a valid UUID") % network_id) ip_address = str(netaddr.IPAddress(data['ip_address'])) lease_remaining = int(data['lease_remaining']) self.callback(network_id, ip_address, lease_remaining) except ValueError as e: LOG.warn(_('Unable to parse lease relay msg to dict.')) LOG.warn(_('Exception value: %s'), e) LOG.warn(_('Message representation: %s'), repr(msg)) except Exception as e: LOG.exception(_('Unable update lease. Exception')) def start(self): """Spawn a green thread to run the lease relay unix socket server.""" listener = eventlet.listen(cfg.CONF.dhcp_lease_relay_socket, family=socket.AF_UNIX) eventlet.spawn(eventlet.serve, listener, self._handler) class DhcpAgentWithStateReport(DhcpAgent): def __init__(self, host=None): super(DhcpAgentWithStateReport, self).__init__(host=host) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) self.agent_state = { 'binary': 'quantum-dhcp-agent', 'host': host, 'topic': topics.DHCP_AGENT, 'configurations': { 'dhcp_driver': cfg.CONF.dhcp_driver, 'use_namespaces': cfg.CONF.use_namespaces, 'dhcp_lease_time': cfg.CONF.dhcp_lease_time}, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_DHCP} report_interval = cfg.CONF.AGENT.report_interval if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) self.heartbeat.start(interval=report_interval) def _report_state(self): try: self.agent_state.get('configurations').update( self.cache.get_state()) ctx = context.get_admin_context_without_session() self.state_rpc.report_state(ctx, self.agent_state) except AttributeError: # This means the server does not support report_state LOG.warn(_("Quantum server does not support state report." " State report for this agent will be disabled.")) self.heartbeat.stop() self.run() return except Exception: LOG.exception(_("Failed reporting state!")) return if self.agent_state.pop('start_flag', None): self.run() def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.needs_resync = True LOG.info(_("agent_updated by server side %s!"), payload) def after_start(self): LOG.info(_("DHCP agent started")) def register_options(): cfg.CONF.register_opts(DhcpAgent.OPTS) config.register_agent_state_opts_helper(cfg.CONF) config.register_root_helper(cfg.CONF) cfg.CONF.register_opts(DeviceManager.OPTS) cfg.CONF.register_opts(DhcpLeaseRelay.OPTS) cfg.CONF.register_opts(dhcp.OPTS) cfg.CONF.register_opts(interface.OPTS) def main(): eventlet.monkey_patch() register_options() cfg.CONF(project='quantum') config.setup_logging(cfg.CONF) server = quantum_service.Service.create( binary='quantum-dhcp-agent', topic=topics.DHCP_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager='quantum.agent.dhcp_agent.DhcpAgentWithStateReport') service.launch(server).wait()
{ "content_hash": "5f680ca83778ca2b193d6474e54a94d1", "timestamp": "", "source": "github", "line_count": 724, "max_line_length": 79, "avg_line_length": 38.994475138121544, "alnum_prop": 0.5669807310852932, "repo_name": "yamt/neutron", "id": "7c64d8d82549ffc7acc23546cdc4554ba49c510b", "size": "28913", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "quantum/agent/dhcp_agent.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "37307" }, { "name": "JavaScript", "bytes": "67928" }, { "name": "Perl", "bytes": "235" }, { "name": "Python", "bytes": "4078056" }, { "name": "Shell", "bytes": "10023" }, { "name": "XSLT", "bytes": "50907" } ], "symlink_target": "" }
import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from tensorflow_serving.servables.tensorflow import session_bundle_config_pb2 as tensorflow__serving_dot_servables_dot_tensorflow_dot_session__bundle__config__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='tensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter.proto', package='tensorflow.serving', syntax='proto3', serialized_options=None, serialized_pb=_b('\nOtensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter.proto\x12\x12tensorflow.serving\x1a\x43tensorflow_serving/servables/tensorflow/session_bundle_config.proto\"f\n#SavedModelBundleSourceAdapterConfig\x12?\n\rlegacy_config\x18\xe8\x07 \x01(\x0b\x32\'.tensorflow.serving.SessionBundleConfigb\x06proto3') , dependencies=[tensorflow__serving_dot_servables_dot_tensorflow_dot_session__bundle__config__pb2.DESCRIPTOR,]) _SAVEDMODELBUNDLESOURCEADAPTERCONFIG = _descriptor.Descriptor( name='SavedModelBundleSourceAdapterConfig', full_name='tensorflow.serving.SavedModelBundleSourceAdapterConfig', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='legacy_config', full_name='tensorflow.serving.SavedModelBundleSourceAdapterConfig.legacy_config', index=0, number=1000, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=172, serialized_end=274, ) _SAVEDMODELBUNDLESOURCEADAPTERCONFIG.fields_by_name['legacy_config'].message_type = tensorflow__serving_dot_servables_dot_tensorflow_dot_session__bundle__config__pb2._SESSIONBUNDLECONFIG DESCRIPTOR.message_types_by_name['SavedModelBundleSourceAdapterConfig'] = _SAVEDMODELBUNDLESOURCEADAPTERCONFIG _sym_db.RegisterFileDescriptor(DESCRIPTOR) SavedModelBundleSourceAdapterConfig = _reflection.GeneratedProtocolMessageType('SavedModelBundleSourceAdapterConfig', (_message.Message,), dict( DESCRIPTOR = _SAVEDMODELBUNDLESOURCEADAPTERCONFIG, __module__ = 'tensorflow_serving.servables.tensorflow.saved_model_bundle_source_adapter_pb2' # @@protoc_insertion_point(class_scope:tensorflow.serving.SavedModelBundleSourceAdapterConfig) )) _sym_db.RegisterMessage(SavedModelBundleSourceAdapterConfig) # @@protoc_insertion_point(module_scope)
{ "content_hash": "54763e3e0e770d16dd60f0f8cd806e96", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 346, "avg_line_length": 42.72463768115942, "alnum_prop": 0.7883310719131614, "repo_name": "diplomacy/research", "id": "09933ef317880a88c22124b449188108ddb615cc", "size": "3098", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "diplomacy_research/proto/tensorflow_serving/servables/tensorflow/saved_model_bundle_source_adapter_pb2.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "592" }, { "name": "C++", "bytes": "5188" }, { "name": "Dockerfile", "bytes": "31749" }, { "name": "Groovy", "bytes": "15568" }, { "name": "Python", "bytes": "2557493" }, { "name": "Shell", "bytes": "26305" } ], "symlink_target": "" }
from setuptools import setup def get_version(filename): """ Parse the value of the __version__ var from a Python source file without running/importing the file. """ import re version_pattern = r"^ *__version__ *= *['\"](\d+\.\d+\.\d+)['\"] *$" match = re.search(version_pattern, open(filename).read(), re.MULTILINE) assert match, ("No version found in file: {!r} matching pattern: {!r}" .format(filename, version_pattern)) return match.group(1) setup( name="sublcmd", version=get_version("sublcmd.py"), py_modules=["sublcmd"], author="Hal Blackburn", author_email="hwtb2@cam.ac.uk", install_requires=[], entry_points={ "console_scripts": [ "subl = sublcmd:main", ] } )
{ "content_hash": "55607629a70c762d14014a7157524ccf", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 75, "avg_line_length": 25.483870967741936, "alnum_prop": 0.5810126582278481, "repo_name": "h4l/sublcmd", "id": "2e51160cd3e345805e5c78d1228b5d9cc856f502", "size": "790", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "2078" } ], "symlink_target": "" }
""" Greeting service provider :author: Thomas Calmant :copyright: Copyright 2016, Thomas Calmant :license: Apache License 2.0 .. Copyright 2016 Thomas Calmant Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Module version __version_info__ = (0, 5, 9) __version__ = ".".join(str(x) for x in __version_info__) # Documentation strings format __docformat__ = "restructuredtext en" # ------------------------------------------------------------------------------ # Pelix remote services constants from pelix.constants import BundleActivator import pelix.remote # ------------------------------------------------------------------------------ # Service specification SERVICE_SPECIFICATION = "sample.grettings" # ------------------------------------------------------------------------------ class HelloWorldImpl(object): """ Implementation of the greeting service """ def sayHello(self, name): """ Prints a greeting message @param name Some name """ print("Python>> Hello, {0} !".format(name)) # ------------------------------------------------------------------------------ @BundleActivator class Activator(object): """ The bundle activator """ def __init__(self): """ Sets up members """ self.__registration = None def start(self, context): """ Bundle started @param context The bundle context """ # Prepare export properties props = {pelix.remote.PROP_EXPORTED_INTERFACES: [SERVICE_SPECIFICATION]} # Register the service with the Java specification self.__registration = context.register_service(SERVICE_SPECIFICATION, HelloWorldImpl(), props) def stop(self, context): """ Bundle stopped @param context The bundle context """ # Unregister the service self.__registration.unregister() self.__registration = None
{ "content_hash": "364afb1810f39d74cda250e1fd6bab7b", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 80, "avg_line_length": 27.473118279569892, "alnum_prop": 0.5549902152641879, "repo_name": "ahmadshahwan/ipopo", "id": "bc5742f8073c1e21025bc84cbd8c7568348a9d6f", "size": "2605", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "samples/remote/provider.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "1557057" }, { "name": "Shell", "bytes": "2803" } ], "symlink_target": "" }
import collections from constants import ServerType from models import Server from models import JumpHost from models import SystemOption from models import CustomCommandProfile from utils import concatenate_dirs from utils import is_empty from utils import make_url from constants import get_temp_directory from constants import get_log_directory from constants import get_migration_directory from common import get_user_by_id from constants import DefaultHostAuthenticationChoice class Context(object): def __init__(self): self._success = False @property def success(self): return self._success @success.setter def success(self, value): self._success = value class ConnectionContext(Context): def __init__(self, db_session, host): Context.__init__(self) self.host = host self.db_session = db_session @property def hostname(self): return self.host.hostname def load_data(self, key): return self.host.context[0].data.get(key) def save_data(self, key, value): self.host.context[0].data[key] = value def get_data_modified_time(self): return self.host.context[0].modified_time @property def host_urls(self): return self.make_urls() def make_urls(self, preferred_host_username=None, preferred_host_password=None): urls = [] if len(self.host.connection_param) > 0: connection = self.host.connection_param[0] jump_host_url = '' # Checks if there is a jump server if connection.jump_host_id is not None: try: jump_host = self.db_session.query(JumpHost).filter(JumpHost.id == connection.jump_host_id).first() if jump_host is not None: jump_host_url = make_url( connection_type=jump_host.connection_type, host_username=jump_host.username, host_password=jump_host.password, host_or_ip=jump_host.host_or_ip, port_number=jump_host.port_number) except: pass host_username = connection.username host_password = connection.password if not is_empty(preferred_host_username) and not is_empty(preferred_host_password): host_username = preferred_host_username host_password = preferred_host_password else: system_option = SystemOption.get(self.db_session) if system_option.enable_default_host_authentication: if not is_empty(system_option.default_host_username) and not is_empty(system_option.default_host_password): if system_option.default_host_authentication_choice == DefaultHostAuthenticationChoice.ALL_HOSTS or \ (system_option.default_host_authentication_choice == DefaultHostAuthenticationChoice.HOSTS_WITH_NO_SPECIFIED_USERNAME_AND_PASSWORD and is_empty(host_username) and is_empty(host_password)): host_username = system_option.default_host_username host_password = system_option.default_host_password for host_or_ip in connection.host_or_ip.split(','): for port_number in connection.port_number.split(','): host_urls = [] if not is_empty(jump_host_url): host_urls.append(jump_host_url) host_urls.append(make_url( connection_type=connection.connection_type, host_username=host_username, host_password=host_password, host_or_ip=host_or_ip, port_number=port_number, enable_password=connection.enable_password)) urls.append(host_urls) return urls class InventoryContext(ConnectionContext): def __init__(self, db_session, host, inventory_job): ConnectionContext.__init__(self, db_session, host) self.inventory_job = inventory_job @property def log_directory(self): return get_log_directory() + self.inventory_job.session_log @property def requested_action(self): return 'Get-Inventory' def load_job_data(self, key): return self.inventory_job.load_data(key) def save_job_data(self, key, value): self.inventory_job.save_data(key, value) def post_status(self, message): if self.db_session is not None and self.inventory_job is not None: try: self.inventory_job.set_status_message(message) self.db_session.commit() except Exception: self.db_session.rollback() class InstallContext(ConnectionContext): def __init__(self, db_session, host, install_job): ConnectionContext.__init__(self, db_session, host) self.install_job = install_job self.custom_commands = [] custom_command_profile_ids = self.install_job.custom_command_profile_ids if custom_command_profile_ids: for id in custom_command_profile_ids.split(','): profile = self.db_session.query(CustomCommandProfile).filter(CustomCommandProfile.id == id).first() if profile: for command in profile.command_list.split(','): if command not in self.custom_commands: self.custom_commands.append(command) def save_job_info(self, value): key = 'job_info' job_info = self.install_job.load_data(key) if not job_info: job_info = [] job_info.append(value) self.save_job_data(key, job_info) def load_job_data(self, key): return self.install_job.load_data(key) def save_job_data(self, key, value): self.install_job.save_data(key, value) @property def software_packages(self): return self.install_job.packages.split(',') @property def requested_action(self): return self.install_job.install_action @property def log_directory(self): return get_log_directory() + self.install_job.session_log @property def migration_directory(self): return get_migration_directory() @property def custom_commands(self): return self._custom_commands @custom_commands.setter def custom_commands(self, value): self._custom_commands = value def _generate_operation_id_key(self, tar_files): if isinstance(tar_files, collections.Iterable): return "_".join(sorted(tar_files)) + "_operation_id" return None def get_operation_id(self, tar_files): key = self._generate_operation_id_key(tar_files) if key: return self.load_data(key) return None def set_operation_id(self, tar_files, value): key = self._generate_operation_id_key(tar_files) if key: try: print "saving key {} as {}".format(key, value) self.save_data(key, int(value)) except Exception: self.save_data(key, -1) return @property def host_urls(self): system_option = SystemOption.get(self.db_session) if system_option.enable_user_credential_for_host: user = get_user_by_id(self.db_session, self.install_job.user_id) if user is not None: return self.make_urls(user.username, user.host_password) return self.make_urls() @property def get_server(self): """ Return the user selected server object where the packages can be found. """ server_id = self.install_job.server_id server = self.db_session.query(Server).filter(Server.id == server_id).first() return server @property def get_host(self): """ Return the host object. """ return self.host @property def server_repository_url(self): """ Return the server repository URL (TFTP/FTP) where the packages can be found. tftp://223.255.254.254;VRF/auto/tftp-gud/sit ftp://username:password@10.55.7.21;VRF/remote/directory """ server_id = self.install_job.server_id server = self.db_session.query(Server).filter(Server.id == server_id).first() if server is not None: server_type = server.server_type if server_type == ServerType.TFTP_SERVER: tftp_string = 'tftp://' url = '{}{}'.format(tftp_string, server.server_url.replace(tftp_string, '')) if not is_empty(server.vrf): try: pos = url.index('/', len(tftp_string)) except ValueError: pos = len(url) url = url[:pos] + ';' + server.vrf + url[pos:] server_sub_directory = self.install_job.server_directory if not is_empty(server_sub_directory): url += '/' + server_sub_directory return url elif server_type == ServerType.FTP_SERVER or server_type == ServerType.SFTP_SERVER: protocol = 'ftp' if server_type == ServerType.FTP_SERVER else 'sftp' url = protocol + "://{}:{}@{}".format(server.username, server.password, server.server_url) if server_type == ServerType.FTP_SERVER and not is_empty(server.vrf): url = url + ";{}".format(server.vrf) remote_directory = concatenate_dirs(server.server_directory, self.install_job.server_directory) if not is_empty(remote_directory): url = url + "/{}".format(remote_directory) return url elif server_type == ServerType.SCP_SERVER: # scp root:password@10.77.132.122:/home_directory destination_on_host return "scp {}:{}@{}:{} {}".format(server.username, server.password, server.server_url, server.server_directory, server.destination_on_host) elif server_type == ServerType.LOCAL_SERVER: return server.server_url return None def post_status(self, message): if self.db_session is not None and self.install_job is not None: try: self.install_job.set_status_message(message) self.db_session.commit() except Exception: self.db_session.rollback() class TestConnectionContext(Context): def __init__(self, hostname, urls): Context.__init__(self) self.hostname = hostname self.urls = urls @property def log_directory(self): return get_temp_directory() def post_status(self, message): pass @property def host_urls(self): return self.urls
{ "content_hash": "07d149597f7d92a07737e98b49e0eccb", "timestamp": "", "source": "github", "line_count": 325, "max_line_length": 127, "avg_line_length": 36.12, "alnum_prop": 0.5555839509327881, "repo_name": "csm-aut/csm", "id": "d0777dddf94e15211c3736d275a8c8de01b95bf7", "size": "13253", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "csmserver/context.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "84140" }, { "name": "HTML", "bytes": "685025" }, { "name": "JavaScript", "bytes": "114664" }, { "name": "Python", "bytes": "1044259" }, { "name": "Shell", "bytes": "3584" } ], "symlink_target": "" }
from django.contrib.auth.models import User from django.test import TestCase from social.app.models.author import Author from social.app.models.node import Node class NodeTestCase(TestCase): def setUp(self): Node.objects.create(name="Test", host="http://www.socdis.com/", service_url="http://api.socdis.com/") def test_to_str_method(self): node = Node.objects.get(name="Test") self.assertEqual(str(node), "Test (http://www.socdis.com/; http://api.socdis.com/)") class AuthorTestCase(TestCase): def setUp(self): self.node = Node.objects.create(name="Test", host="http://www.socdis.com/", service_url="http://api.socdis.com/", local=True) user = User.objects.create_user("test1", "test@test.com", "pass1") self.author = Author.objects.get(user__id=user.id) self.author.displayName = "Bobbert" self.author.user.last_name = "McBob" self.author.user.first_name = "Bob" def test_author_str(self): self.assertEquals(str(self.author), "Bobbert") def test_author_does_not_follow(self): user = User.objects.create_user("test2", "test@test.com", "pass1") author = Author.objects.get(user__id=user.id) self.assertFalse(self.author.follows(author)) def test_author_does_follow(self): user = User.objects.create_user("test2", "test@test.com", "pass1") author = Author.objects.get(user__id=user.id) self.author.followed_authors.add(author) self.assertTrue(self.author.follows(author))
{ "content_hash": "68eeaf2f9251482183128a7fb8322b9b", "timestamp": "", "source": "github", "line_count": 42, "max_line_length": 109, "avg_line_length": 37.666666666666664, "alnum_prop": 0.6466498103666245, "repo_name": "TeamAADGT/CMPUT404-project-socialdistribution", "id": "5e3faedc2173bdc9f6f2a893412d1327383109d7", "size": "1582", "binary": false, "copies": "1", "ref": "refs/heads/dev", "path": "social/app/tests/test_models.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "2706" }, { "name": "HTML", "bytes": "42308" }, { "name": "JavaScript", "bytes": "15525" }, { "name": "Python", "bytes": "241725" } ], "symlink_target": "" }
class SilentException(Exception): def __init__(self): Exception.__init__(self)
{ "content_hash": "6671d9332dce76a32a4031a8e881a235", "timestamp": "", "source": "github", "line_count": 3, "max_line_length": 33, "avg_line_length": 30.333333333333332, "alnum_prop": 0.6263736263736264, "repo_name": "earlye/nephele", "id": "09c87843e431cf54b3d5ff4fc8f14a40eaf2a8ab", "size": "91", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "nephele/SilentException.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "790" }, { "name": "Python", "bytes": "65001" } ], "symlink_target": "" }
import requests_mock from tests.fixture_data import DATUM_ALIAS_DATA from valohai_cli.commands.alias.list import list def test_list(runner, logged_in_and_linked): with requests_mock.mock() as m: m.get( 'https://app.valohai.com/api/v0/datum-aliases/', json={ 'results': [ DATUM_ALIAS_DATA, DATUM_ALIAS_DATA, DATUM_ALIAS_DATA, ], }, ) output = runner.invoke(list, catch_exceptions=False).output assert output.count('datum://this-is-alias-for-latest-png') == 3 # Three times the url
{ "content_hash": "419c38ebcc6654a32e721ccda94e6e4e", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 95, "avg_line_length": 29.545454545454547, "alnum_prop": 0.5507692307692308, "repo_name": "valohai/valohai-cli", "id": "0a6344c01fa61792cd1f675100b388ce4a09bb8f", "size": "650", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/commands/alias/test_list.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "411" }, { "name": "Python", "bytes": "279031" } ], "symlink_target": "" }
"""The 1-Wire component.""" import logging from pyownet import protocol from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import device_registry as dr from .const import DOMAIN, PLATFORMS from .onewirehub import CannotConnect, OneWireHub _LOGGER = logging.getLogger(__name__) async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up a 1-Wire proxy for a config entry.""" hass.data.setdefault(DOMAIN, {}) onewire_hub = OneWireHub(hass) try: await onewire_hub.initialize(entry) except ( CannotConnect, # Failed to connect to the server protocol.OwnetError, # Connected to server, but failed to list the devices ) as exc: raise ConfigEntryNotReady() from exc hass.data[DOMAIN][entry.entry_id] = onewire_hub await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS) entry.async_on_unload(entry.add_update_listener(options_update_listener)) return True async def async_remove_config_entry_device( hass: HomeAssistant, config_entry: ConfigEntry, device_entry: dr.DeviceEntry ) -> bool: """Remove a config entry from a device.""" onewire_hub: OneWireHub = hass.data[DOMAIN][config_entry.entry_id] return not device_entry.identifiers.intersection( (DOMAIN, device.id) for device in onewire_hub.devices or [] ) async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool: """Unload a config entry.""" unload_ok = await hass.config_entries.async_unload_platforms( config_entry, PLATFORMS ) if unload_ok: hass.data[DOMAIN].pop(config_entry.entry_id) return unload_ok async def options_update_listener(hass: HomeAssistant, entry: ConfigEntry) -> None: """Handle options update.""" _LOGGER.debug("Configuration options updated, reloading OneWire integration") await hass.config_entries.async_reload(entry.entry_id)
{ "content_hash": "1d524f597958763c4eb91c953fa99dec", "timestamp": "", "source": "github", "line_count": 62, "max_line_length": 85, "avg_line_length": 33.38709677419355, "alnum_prop": 0.7246376811594203, "repo_name": "nkgilley/home-assistant", "id": "e3454a5eb5cb2a6ad67c2411ab22ed07819b5922", "size": "2070", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "homeassistant/components/onewire/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Dockerfile", "bytes": "2963" }, { "name": "PLSQL", "bytes": "840" }, { "name": "Python", "bytes": "51597279" }, { "name": "Shell", "bytes": "6252" } ], "symlink_target": "" }
"""Seq2seq layer operations for use in neural networks. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope from tensorflow.python.util import nest __all__ = ["Decoder", "dynamic_decode"] _transpose_batch_time = rnn._transpose_batch_time # pylint: disable=protected-access @six.add_metaclass(abc.ABCMeta) class Decoder(object): """An RNN Decoder abstract interface object.""" @property def batch_size(self): """The batch size of the inputs returned by `sample`.""" raise NotImplementedError @property def output_size(self): """A (possibly nested tuple of...) integer[s] or `TensorShape` object[s].""" raise NotImplementedError @property def output_dtype(self): """A (possibly nested tuple of...) dtype[s].""" raise NotImplementedError @abc.abstractmethod def initialize(self, name=None): """Called before any decoding iterations. Args: name: Name scope for any created operations. Returns: `(finished, first_inputs, initial_state)`. """ raise NotImplementedError @abc.abstractmethod def step(self, time, inputs, state, name=None): """Called per step of decoding (but only once for dynamic decoding). Args: time: Scalar `int32` tensor. inputs: Input (possibly nested tuple of) tensor[s] for this time step. state: State (possibly nested tuple of) tensor[s] from previous time step. name: Name scope for any created operations. Returns: `(outputs, next_state, next_inputs, finished)`. """ raise NotImplementedError def _create_zero_outputs(size, dtype, batch_size): """Create a zero outputs Tensor structure.""" def _t(s): return (s if isinstance(s, ops.Tensor) else constant_op.constant( tensor_shape.TensorShape(s).as_list(), dtype=dtypes.int32, name="zero_suffix_shape")) def _create(s, d): return array_ops.zeros( array_ops.concat( ([batch_size], _t(s)), axis=0), dtype=d) return nest.map_structure(_create, size, dtype) def dynamic_decode(decoder, output_time_major=False, impute_finished=False, maximum_iterations=None, parallel_iterations=32, swap_memory=False, scope=None): """Perform dynamic decoding with `decoder`. Args: decoder: A `Decoder` instance. output_time_major: Python boolean. Default: `False` (batch major). If `True`, outputs are returned as time major tensors (this mode is faster). Otherwise, outputs are returned as batch major tensors (this adds extra time to the computation). impute_finished: Python boolean. If `True`, then states for batch entries which are marked as finished get copied through and the corresponding outputs get zeroed out. This causes some slowdown at each time step, but ensures that the final state and outputs have the correct values and that backprop ignores time steps that were marked as finished. maximum_iterations: `int32` scalar, maximum allowed number of decoding steps. Default is `None` (decode until the decoder is fully done). parallel_iterations: Argument passed to `tf.while_loop`. swap_memory: Argument passed to `tf.while_loop`. scope: Optional variable scope to use. Returns: `(final_outputs, final_state)`. Raises: TypeError: if `decoder` is not an instance of `Decoder`. ValueError: if maximum_iterations is provided but is not a scalar. """ if not isinstance(decoder, Decoder): raise TypeError("Expected decoder to be type Decoder, but saw: %s" % type(decoder)) with variable_scope.variable_scope(scope or "decoder") as varscope: # Properly cache variable values inside the while_loop if varscope.caching_device is None: varscope.set_caching_device(lambda op: op.device) if maximum_iterations is not None: maximum_iterations = ops.convert_to_tensor( maximum_iterations, dtype=dtypes.int32, name="maximum_iterations") if maximum_iterations.get_shape().ndims != 0: raise ValueError("maximum_iterations must be a scalar") initial_finished, initial_inputs, initial_state = decoder.initialize() zero_outputs = _create_zero_outputs(decoder.output_size, decoder.output_dtype, decoder.batch_size) if maximum_iterations is not None: initial_finished = math_ops.logical_or( initial_finished, 0 >= maximum_iterations) initial_time = constant_op.constant(0, dtype=dtypes.int32) def _shape(batch_size, from_shape): if not isinstance(from_shape, tensor_shape.TensorShape): return tensor_shape.TensorShape(None) else: batch_size = tensor_util.constant_value( ops.convert_to_tensor( batch_size, name="batch_size")) return tensor_shape.TensorShape([batch_size]).concatenate(from_shape) def _create_ta(s, d): return tensor_array_ops.TensorArray( dtype=d, size=0, dynamic_size=True, element_shape=_shape(decoder.batch_size, s)) initial_outputs_ta = nest.map_structure(_create_ta, decoder.output_size, decoder.output_dtype) def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs, finished): return math_ops.logical_not(math_ops.reduce_all(finished)) def body(time, outputs_ta, state, inputs, finished): """Internal while_loop body. Args: time: scalar int32 tensor. outputs_ta: structure of TensorArray. state: (structure of) state tensors and TensorArrays. inputs: (structure of) input tensors. finished: 1-D bool tensor. Returns: `(time + 1, outputs_ta, next_state, next_inputs, next_finished)`. """ (next_outputs, decoder_state, next_inputs, decoder_finished) = decoder.step(time, inputs, state) next_finished = math_ops.logical_or(decoder_finished, finished) if maximum_iterations is not None: next_finished = math_ops.logical_or( next_finished, time + 1 >= maximum_iterations) nest.assert_same_structure(state, decoder_state) nest.assert_same_structure(outputs_ta, next_outputs) nest.assert_same_structure(inputs, next_inputs) # Zero out output values past finish if impute_finished: emit = nest.map_structure( lambda out, zero: array_ops.where(finished, zero, out), next_outputs, zero_outputs) else: emit = next_outputs # Copy through states past finish def _maybe_copy_state(new, cur): # TensorArrays and scalar states get passed through. if isinstance(cur, tensor_array_ops.TensorArray): pass_through = True else: new.set_shape(cur.shape) pass_through = (new.shape.ndims == 0) return new if pass_through else array_ops.where(finished, cur, new) if impute_finished: next_state = nest.map_structure( _maybe_copy_state, decoder_state, state) else: next_state = decoder_state outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out), outputs_ta, emit) return (time + 1, outputs_ta, next_state, next_inputs, next_finished) res = control_flow_ops.while_loop( condition, body, loop_vars=[ initial_time, initial_outputs_ta, initial_state, initial_inputs, initial_finished ], parallel_iterations=parallel_iterations, swap_memory=swap_memory) final_outputs_ta = res[1] final_state = res[2] final_outputs = nest.map_structure(lambda ta: ta.stack(), final_outputs_ta) if not output_time_major: final_outputs = nest.map_structure(_transpose_batch_time, final_outputs) return final_outputs, final_state
{ "content_hash": "56971784d16d0e127cc2dbfb7cf6d96c", "timestamp": "", "source": "github", "line_count": 247, "max_line_length": 85, "avg_line_length": 35.22267206477733, "alnum_prop": 0.6562068965517242, "repo_name": "abhitopia/tensorflow", "id": "6338eb152e96031365e4065dd21c7b28a9abbd5c", "size": "9389", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "tensorflow/contrib/seq2seq/python/ops/decoder.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "7481" }, { "name": "C", "bytes": "177254" }, { "name": "C++", "bytes": "22804170" }, { "name": "CMake", "bytes": "140337" }, { "name": "CSS", "bytes": "774" }, { "name": "Go", "bytes": "794578" }, { "name": "HTML", "bytes": "593171" }, { "name": "Java", "bytes": "286562" }, { "name": "JavaScript", "bytes": "13906" }, { "name": "Jupyter Notebook", "bytes": "1833654" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "37240" }, { "name": "Objective-C", "bytes": "7037" }, { "name": "Objective-C++", "bytes": "64166" }, { "name": "Protocol Buffer", "bytes": "209604" }, { "name": "Python", "bytes": "20006785" }, { "name": "Shell", "bytes": "331908" }, { "name": "TypeScript", "bytes": "789019" } ], "symlink_target": "" }
"""Compiled parallel-for loop.""" # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import bitwise_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gen_parsing_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.platform import flags from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest flags.DEFINE_bool( "op_conversion_fallback_to_while_loop", False, "If true, falls back to using a while loop for ops for " "which a converter is not defined.") def _stack(t, length): """stacks `t` `length` times.""" ones = array_ops.ones_like(array_ops.shape(t)) multiples = array_ops.concat([length, ones], 0) t = array_ops.tile(array_ops.expand_dims(t, 0), multiples) return wrap(t, True) # The following stateful ops can be safely called once, and with the same # signature as the unconverted version, if their inputs are loop invariant. # TODO(agarwal): implement a strategy for converting Variable reads/writes. The # plan is to map each read/write in the loop_fn to a corresponding merged # read/write in the converted graph. Writes need to be mergeable (e.g. # AssignAdd) to be used in `pfor`. Given a certain read/write order in the # loop_fn, doing a one-to-one conversion will simulate executing such # instructions in lock-step across all iterations. passthrough_stateful_ops = set([ "VariableV2", "VarHandleOp", "ReadVariableOp", "StackV2", "TensorArrayWriteV3", "TensorArrayReadV3", "TensorArraySizeV3", ]) def _is_stateful_pfor_op(op): if isinstance(op, WhileOp): return op.is_stateful if op.type == "Const": # Const didn't have an op_def. return False if op.type in passthrough_stateful_ops: return False assert hasattr(op, "op_def") and op.op_def is not None, op return op.op_def.is_stateful # pylint: disable=protected-access class WhileOp(object): """Object for storing state for converting the outputs of a while_loop.""" def __init__(self, exit_node, pfor_ops): """Initializer. Args: exit_node: A tensor output from the while_loop. pfor_ops: list of ops inside the current pfor loop. """ self._pfor_ops = set(pfor_ops) self._pfor_op_ids = set([x._id for x in pfor_ops]) assert isinstance(exit_node, ops.Tensor) self._while_context = exit_node.op._get_control_flow_context() assert isinstance(self._while_context, control_flow_ops.WhileContext) self._context_name = self._while_context.name self._condition = self._while_context.pivot.op.inputs[0] # Parts of an external while_loop could be created inside a pfor loop. # However for the purpose here, we declare such loops to be external. Also # note that we check if the condition was created inside or outside to # determine if the while_loop was first created inside or outside. # TODO(agarwal): check that the Enter and Exit of this loop are unstacked. self._is_inside_loop = self.op_is_inside_loop(self._condition.op) if self._is_inside_loop: for e in self._while_context.loop_exits: assert self.op_is_inside_loop(e.op) # Note the code below tries to reverse engineer an existing while_loop graph # by assuming the following pattern of nodes. # # NextIteration <---- Body <--- Enter # | ^ # V ___| Y # Enter -> Merge -> Switch___ # ^ | N # | V # LoopCond Exit # Node that elements in the list below correspond one-to-one with each # other. i.e. these lists are the same size, and the i_th entry corresponds # to different Operations/Tensors of a single cycle as illustrated above. # List of Switch ops (ops.Operation) that feed into an Exit Node. self._exit_switches = [] # List of inputs (ops.Tensor) to NextIteration. self._body_outputs = [] # List of list of control inputs of the NextIteration nodes. self._next_iter_control_inputs = [] # List of Merge ops (ops.Operation). self._enter_merges = [] # List of output (ops.Tensor) of Exit nodes. self._outputs = [] # List of Enter Tensors. # There are two types of Enter nodes: # - The Enter nodes that are used in the `loop_vars` argument to # `while_loop` (see # https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect # these Enter nodes immediately below by tracing backwards from the Exit # nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the # diagram above. This allows us to have a 1:1 correspondence between the # self._outputs and the first elements in self._enters. # - The Enter nodes that are used only by the body. They don't appear in the # `loop_vars` and are not returned from the `while_loop`. In Python code, # they are usually captured by the body lambda. We collect them below by # iterating over all the ops in the graph. They are appended to the end of # self._enters or self._direct_enters, and don't correspond to any outputs # in self._outputs. Note that we keep the resource/variant Enter nodes in # self._direct_enters and the constructed while_loop's body uses them # directly as opposed to passing them as loop variables. This is done # because the while_body cannot partition the resource/variant Tensors, so # it has to leave them unchanged. self._enters = [] self._direct_enters = [] for e in self._while_context.loop_exits: self._outputs.append(e.op.outputs[0]) switch = e.op.inputs[0].op assert switch.type == "Switch", switch self._exit_switches.append(switch) merge = switch.inputs[0].op assert merge.type == "Merge", merge self._enter_merges.append(merge) enter = merge.inputs[0].op assert enter.type == "Enter", enter self._enters.append(enter.outputs[0]) next_iter = merge.inputs[1].op assert next_iter.type == "NextIteration", next_iter self._body_outputs.append(next_iter.inputs[0]) self._next_iter_control_inputs.append(next_iter.control_inputs) # Collect all the Enter nodes that are not part of `loop_vars`, the second # category described above. # Also track whether the loop body has any stateful ops. self._is_stateful = False for op in ops.get_default_graph().get_operations(): # TODO(agarwal): make sure this works with nested case. control_flow_context = op._get_control_flow_context() if control_flow_context is None: continue if control_flow_context.name == self._context_name: self._is_stateful |= _is_stateful_pfor_op(op) if op.type == "Enter": output = op.outputs[0] if output not in self._enters: if output.dtype in (dtypes.resource, dtypes.variant): if output not in self._direct_enters: self._direct_enters.append(output) else: self._enters.append(output) def __str__(self): """String representation.""" return "while_loop(%s)" % self.name @property def inputs(self): """Input to all the Enter nodes.""" return [x.op.inputs[0] for x in self._enters + self._direct_enters] @property def control_inputs(self): """Control input to all the Enter nodes.""" control_inputs = [] for x in self._enters + self._direct_enters: control_inputs.extend(x.op.control_inputs) return control_inputs @property def outputs(self): """Outputs of all the Exit nodes.""" return self._outputs @property def name(self): """Context name for the while loop.""" return self._context_name @property def is_inside_loop(self): """Returns true if the while_loop was created inside the pfor.""" return self._is_inside_loop def op_is_inside_loop(self, op): """True if op was created inside the pfor loop body.""" assert isinstance(op, ops.Operation) # Note that we use self._pfor_op_ids for the check and not self._pfor_ops # since it appears there tensorflow API could return different python # objects representing the same Operation node. return op._id in self._pfor_op_ids @property def is_stateful(self): return self._is_stateful @property def pfor_converter(self): """Return a converter for the while loop.""" return self def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs, inputs_stacked): """Create a PFor object for converting parts of the while_loop. Args: parent_pfor: PFor object being used for converting the while_loop. indices: int32 Tensor of ids for the iterations that are still active (i.e. did not exit the while_loop). cond_stacked: True if the while_loop condition is stacked. inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note that these Tensors are a subset of the loop variables for the generated while_loop. inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`, indicating if the value is stacked or not. Returns: A PFor instance. The instance is initialized by adding conversion mappings of nodes that will be external to the conversion that the returned instance will be used for. e.g. Enter nodes as well as Merge and Switch outputs are mapped to converted values. """ num_outputs = len(self._outputs) assert len(inputs) == len(self._enters) assert len(inputs_stacked) == len(self._enters) loop_var = parent_pfor.loop_var loop_len = array_ops.size(indices) pfor = PFor( loop_var, loop_len, pfor_ops=self._pfor_ops, all_indices=indices, all_indices_partitioned=cond_stacked) # Map all inputs of Enter nodes in self._direct_enters to their converted # values. for enter in self._direct_enters: enter_input = enter.op.inputs[0] converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper( enter_input) # Since these are resources / variants, they should be unstacked. assert not stacked and not is_sparse_stacked, (enter, converted_enter) pfor._add_conversion(enter, wrap(converted_enter, False)) # Map all Enter nodes to the inputs. for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked): pfor._add_conversion(enter, wrap(inp, stacked)) # Map outputs of Switch and Merge. for i in range(num_outputs): wrapped_inp = wrap(inputs[i], inputs_stacked[i]) merge = self._enter_merges[i] pfor._add_conversion(merge.outputs[0], wrapped_inp) # Note that second output of Merge is typically not used, except possibly # as a control dependency. To avoid trying to output the correct value, we # employ a hack here. We output a dummy invalid value with an incorrect # dtype. This will allow control dependency to work but if using it as an # input, it should typically lead to errors during graph construction due # to dtype mismatch. # TODO(agarwal): Check in the original graph to see if there are any # consumers of this Tensor that use it as an input. pfor._add_conversion(merge.outputs[1], wrap(constant_op.constant(-1.0), False)) switch = self._exit_switches[i] # Don't need to worry about switch.output[0] which will feed to Exit node. pfor._add_conversion(switch.outputs[1], wrapped_inp) return pfor def _convert_enter(self, parent_pfor, enter): """Converts an Enter node.""" inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0]) control_inputs = [ parent_pfor._convert_helper(x).t for x in enter.op.control_inputs ] if control_inputs: with ops.control_dependencies(control_inputs): inp = array_ops.identity(inp) return inp, stacked def _maybe_stacked(self, cache, inp): """Heuristic to figue out if the coverting inp leads to a stacked value. Args: cache: map from Tensor to boolean indicating stacked/unstacked. inp: input Tensor. Returns: True if `inp` could get stacked. If the function returns False, the converted value should be guaranteed to be unstacked. If returning True, it may or may not be stacked. """ if inp in cache: return cache[inp] if not self.op_is_inside_loop(inp.op): return False op = inp.op output = False if op.type in [ "Shape", "Rank" "ShapeN", "ZerosLike", "TensorArrayV3", "TensorArraySizeV3", ]: output = False elif _is_stateful_pfor_op(op): # This may be fairly aggressive. output = True elif op.type == "Exit": # This may be fairly aggressive. output = True else: for t in op.inputs: if self._maybe_stacked(cache, t): output = True break cache[inp] = output return output def _create_init_values(self, pfor_input): """Create arguments passed to converted while_loop.""" with ops.name_scope("while_init"): loop_len_vector = pfor_input.pfor.loop_len_vector loop_len = loop_len_vector[0] num_outputs = len(self._outputs) inputs = [] maybe_stacked_cache = {} # Convert all the Enters. Need to do this before checking for stacking # below. for i, enter in enumerate(self._enters): inp, stacked = self._convert_enter(pfor_input.pfor, enter) inputs.append(inp) maybe_stacked_cache[enter] = stacked # Since this enter node is part of the `loop_vars`, it corresponds to an # output and its preceding switch. We mark this switch's output the same # stackness, to act at the base case for the logic below. Below, we will # be going through the body figuring out which inputs might need to be # stacked and which inputs can safely remain unstacked. if i < num_outputs: maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked # Shape invariants for init_values corresponding to self._enters. input_shape_invariants = [] # TensorArrays for outputs of converted while loop output_tas = [] # Shape invariants for output TensorArrays. ta_shape_invariants = [] # List of booleans indicating stackness of inputs, i.e. tensors # corresponding to self._enters. inputs_stacked = [] for i, inp in enumerate(inputs): enter = self._enters[i] inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter) # Note that even when an input is unstacked, the body could make it # stacked. we use a heuristic below to figure out if body may be making # it stacked. if i < num_outputs: body_output = self._body_outputs[i] if enter.op in self._pfor_ops: body_output_stacked = self._maybe_stacked(maybe_stacked_cache, body_output) else: # If constructed outside of pfor loop, then the output would not be # stacked. body_output_stacked = False if body_output_stacked and not inp_stacked: inp = _stack(inp, loop_len_vector).t inputs[i] = inp inp_stacked = True # TODO(agarwal): other attributes for the TensorArray ? output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len)) ta_shape_invariants.append(tensor_shape.TensorShape(None)) inputs_stacked.append(inp_stacked) input_shape_invariants.append(tensor_shape.TensorShape(None)) # See documentation for __call__ for the structure of init_values. init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas # TODO(agarwal): try stricter shape invariants shape_invariants = ( [tensor_shape.TensorShape(None), tensor_shape.TensorShape(None) ] + input_shape_invariants + ta_shape_invariants) return init_values, inputs_stacked, shape_invariants def _process_cond_unstacked(self, conditions, indices, inputs, output_tas): """Handles case when condition is unstacked. Note that all iterations end together. So we don't need to partition the inputs. When all iterations are done, we write the inputs to the TensorArrays. Note that we only write to index 0 of output_tas. Since all iterations end together, they can all be output together. """ not_all_done = array_ops.reshape(conditions, []) new_output_tas = [] # pylint: disable=cell-var-from-loop for i, out_ta in enumerate(output_tas): inp = inputs[i] new_output_tas.append( control_flow_ops.cond(not_all_done, lambda: out_ta, lambda: out_ta.write(0, inp))) # pylint: enable=cell-var-from-loop return not_all_done, indices, inputs, new_output_tas def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked, output_tas): num_outputs = len(self._outputs) # Compute if all iterations are done. not_all_done = math_ops.reduce_any(conditions) conditions_int = math_ops.cast(conditions, dtypes.int32) # Partition the indices. done_indices, new_indices = data_flow_ops.dynamic_partition( indices, conditions_int, 2) new_inputs = [] new_output_tas = [] for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)): # Partition the inputs. if stacked: done_inp, new_inp = data_flow_ops.dynamic_partition( inp, conditions_int, 2) else: # TODO(agarwal): avoid this stacking. See TODO earlier in # _process_cond_unstacked. done_inp = _stack(inp, [array_ops.size(done_indices)]).t new_inp = inp new_inputs.append(new_inp) # For iterations that are done, write them to TensorArrays. if i < num_outputs: out_ta = output_tas[i] # Note that done_indices can be empty. done_inp should also be empty in # that case. new_output_tas.append(out_ta.scatter(done_indices, done_inp)) return not_all_done, new_indices, new_inputs, new_output_tas def _process_body(self, pfor_input, inputs_stacked, new_indices, cond_stacked, new_inputs, not_all_done): """Convert the body function.""" def true_fn(control_inputs, body_pfor, body_output, stacked): """Converts the body function for all but last iteration. This essentially converts body_output. Additionally, it needs to handle any control dependencies on the NextIteration node. So it creates another Identity node with the converted dependencies. """ converted_control_inp = [] for x in control_inputs: for t in x.outputs: converted_control_inp.append(body_pfor._convert_helper(t).t) if stacked: # Note convert always does the stacking. output = body_pfor.convert(body_output) else: output, convert_stacked, _ = body_pfor._convert_helper(body_output) assert convert_stacked == stacked, body_output with ops.control_dependencies(converted_control_inp): return array_ops.identity(output) body_pfor = self._init_pfor(pfor_input.pfor, new_indices, cond_stacked, new_inputs, inputs_stacked) new_outputs = [] for i, (body_output, stacked) in enumerate( zip(self._body_outputs, inputs_stacked)): control_inp = self._next_iter_control_inputs[i] out_dtype = body_output.dtype # Note that we want to run the body only if not all pfor iterations are # done. If all are done, we return empty tensors since these values will # not be used. Notice that the value returned by the loop is based on # TensorArrays and not directly on these returned values. # pylint: disable=cell-var-from-loop new_output = control_flow_ops.cond( not_all_done, lambda: true_fn(control_inp, body_pfor, body_output, stacked), lambda: constant_op.constant([], dtype=out_dtype)) # pylint: enable=cell-var-from-loop new_outputs.append(new_output) return new_outputs def __call__(self, pfor_input): """Converter for the while_loop. The conversion of a while_loop is another while_loop. The arguments to this converted while_loop are as follows: not_all_done: Boolean scalar Tensor indicating if all the pfor iterations are done. indices: int32 1-D Tensor storing the id of the iterations that are not done. args: Remaining arguments. These can be divided into 3 categories: - First set of arguments are the tensors that correspond to the initial elements of self._enters. The elements that appear in original while loop's `loop_vars`. - The second set of arguments are the tensors that correspond to the remaining elements of self._enters. These are the tensors that directly enter the original while loop body. - Finally, the last set of arguments are TensorArrays. These TensorArrays correspond to the outputs of the original while_loop, i.e. to the elements in self._outputs. Each TensorArray has `PFor.loop_len` elements, i.e. the number of pfor iterations. At the end, the i'th element of each TensorArray will contain the output computed by the i'th iteration of pfor. Note that elements can be written into these tensors arrays in any order, depending on when the corresponding pfor iteration is done. If the original while_loop had `k` tensors in its `loop_vars` and its body directly captured `m` tensors, the `args` will contain `2 * k + m` values. In each iteration, the while_loop body recomputes the condition for all active pfor iterations to see which of them are now done. It then partitions all the inputs and passes them along to the converted body. Values for all the iterations that are done are written to TensorArrays indexed by the pfor iteration number. When all iterations are done, the TensorArrays are stacked to get the final value. Args: pfor_input: A PForInput object corresponding to the output of any Exit node from this while loop. Returns: List of converted outputs. """ # Create init_values that will be passed to the while_loop. init_values, inputs_stacked, shape_invariants = self._create_init_values( pfor_input) # Note that we use a list as a hack since we need the nested function body # to set the value of cond_is_stacked. python2.x doesn't support nonlocal # variables. cond_is_stacked = [None] def cond(not_all_done, *_): return not_all_done def body(not_all_done, indices, *args): # See documentatin for __call__ for the structure of *args. num_enters = len(self._enters) inputs = args[:num_enters] output_tas = args[num_enters:] # TODO(agarwal): see which outputs have consumers and only populate the # TensorArrays corresponding to those. Or do those paths get trimmed out # from inside the while_loop body? assert len(inputs) >= len(output_tas) assert len(inputs) == len(inputs_stacked) # Convert condition with ops.name_scope("while_cond"): # Note that we set cond_stacked to True here. At this point we don't # know if it could be loop invariant, hence the conservative value is # to assume stacked. cond_pfor = self._init_pfor(pfor_input.pfor, indices, cond_stacked=True, inputs=inputs, inputs_stacked=inputs_stacked) conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition) cond_is_stacked[0] = cond_stacked # Recompute the new condition, write outputs of done iterations, and # partition the inputs if needed. if not cond_stacked: (not_all_done, new_indices, new_inputs, new_output_tas) = self._process_cond_unstacked( conditions, indices, inputs, output_tas) else: (not_all_done, new_indices, new_inputs, new_output_tas) = self._process_cond_stacked( conditions, indices, inputs, inputs_stacked, output_tas) # Convert body with ops.name_scope("while_body"): # Compute the outputs from the body. new_outputs = self._process_body(pfor_input, inputs_stacked, new_indices, cond_stacked, new_inputs, not_all_done) # Note that the first num_outputs new values of inputs are computed using # the body. Rest of them were direct Enters into the condition/body and # the partitioning done earlier is sufficient to give the new value. num_outputs = len(self._outputs) new_args = ([not_all_done, new_indices] + new_outputs + list( new_inputs[num_outputs:]) + new_output_tas) return tuple(new_args) while_outputs = control_flow_ops.while_loop( cond, body, init_values, shape_invariants=shape_invariants) output_tas = while_outputs[-len(self._outputs):] outputs = [] assert cond_is_stacked[0] is not None for inp_stacked, ta in zip(inputs_stacked, output_tas): if cond_is_stacked[0]: outputs.append(wrap(ta.stack(), True)) else: # Note that if while_loop condition is unstacked, all iterations exit at # the same time and we wrote those outputs in index 0 of the tensor # array. outputs.append(wrap(ta.read(0), inp_stacked)) return outputs class _PforInput(object): """Input object passed to registered pfor converters.""" def __init__(self, pfor, op, inputs): """Creates a _PforInput object. Args: pfor: PFor converter object. op: the Operation object that is being converted. inputs: list of WrappedTensor objects representing converted values of the inputs of `op`. """ self.pfor = pfor self._op = op self._inputs = inputs def stack_inputs(self, stack_indices=None): """Stacks unstacked inputs at `stack_indices`. Args: stack_indices: indices of inputs at which stacking is done. If None, stacking is done at all indices. """ if stack_indices is None: stack_indices = range(len(self._inputs)) length = self.pfor.loop_len_vector for i in stack_indices: inp = self._inputs[i] if not inp.is_stacked: self._inputs[i] = _stack(inp.t, length) def expanddim_inputs_for_broadcast(self): """Reshapes stacked inputs to prepare them for broadcast. Since stacked inputs have an extra leading dimension, automatic broadcasting rules could incorrectly try to expand dimensions before that leading dimension. To avoid that, we reshape these stacked inputs to the maximum rank they will need to be broadcasted to. """ if not self._inputs: return # Find max rank def _get_rank(x): rank = array_ops.rank(x.t) if not x.is_stacked: rank += 1 return rank ranks = [_get_rank(x) for x in self._inputs] max_rank = ranks[0] for rank in ranks[1:]: max_rank = math_ops.maximum(rank, max_rank) for i, inp in enumerate(self._inputs): if inp.is_stacked: shape = array_ops.shape(inp.t) rank_diff = array_ops.reshape(max_rank - ranks[i], [1]) ones = array_ops.tile([1], rank_diff) new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0) self._inputs[i] = wrap(array_ops.reshape(inp.t, new_shape), True) @property def inputs(self): return self._inputs @property def num_inputs(self): return len(self._inputs) def input(self, index): assert len(self._inputs) > index, (index, self._inputs) return self._inputs[index] def stacked_input(self, index): t, is_stacked, _ = self.input(index) if not is_stacked: op_type = self.op_type op_def = getattr(self._op, "op_def", None) if op_def is None: input_name = "at index %d" % index else: input_name = "\"%s\"" % op_def.input_arg[index].name raise ValueError("Input %s of op \"%s\" expected to be not loop invariant" ".\nError while converting op %s" "with converted inputs\n%s" % (input_name, op_type, self._op, self.inputs)) return t def unstacked_input(self, index): t, is_stacked, _ = self.input(index) if is_stacked: op_type = self.op_type op_def = getattr(self._op, "op_def", None) if op_def is None: input_name = "at index %d" % index else: input_name = "\"%s\"" % op_def.input_arg[index].name raise ValueError("Input %s of op \"%s\" expected to be loop invariant" ".\nError while converting op %s" "with converted inputs\n%s" % (input_name, op_type, self._op, self.inputs)) return t @property def op(self): return self._op @property def op_type(self): return self._op.type def get_attr(self, attr): return self._op.get_attr(attr) @property def outputs(self): return self._op.outputs def output(self, index): assert index < len(self._op.outputs) return self._op.outputs[index] _pfor_converter_registry = {} class RegisterPFor(object): """Utility to register converters for pfor. Usage: @RegisterPFor(foo_op_type) def _foo_converter(pfor_input): ... The above will register conversion function `_foo_converter` for handling conversion of `foo_op_type`. During conversion, the registered functin will be called with a single argument of type `PForInput` which will contain state needed for the conversion. This registered function should output a list of WrappedTensor object with the same length as the number of outputs of op being converted. If the op had zero outputs, then it should return a ops.Operation object. """ def __init__(self, op_type): """Creates an object to register a converter for op with type `op_type`.""" self.op_type = op_type def __call__(self, converter): name = self.op_type assert name not in _pfor_converter_registry, "Re-registering %s " % name _pfor_converter_registry[name] = converter return converter class RegisterPForWithArgs(RegisterPFor): """Utility to register converters for pfor. Usage: @RegisteRPFor(foo_op_type, foo=value, ....) def _foo_converter(pfor_input, foo=None, ....): ... See RegisterPFor for details on the conversion function. `RegisterPForWithArgs` allows binding extra arguments to the conversion function at registration time. """ def __init__(self, op_type, *args, **kw_args): super(RegisterPForWithArgs, self).__init__(op_type) self._args = args self._kw_args = kw_args def __call__(self, converter): def _f(pfor_input): return converter(pfor_input, self.op_type, *self._args, **self._kw_args) super(RegisterPForWithArgs, self).__call__(_f) return converter def _create_op(op_type, inputs, op_dtypes, attrs=None): """Utility to create an op.""" return ops.get_default_graph().create_op( op_type, inputs, op_dtypes, attrs=attrs, compute_device=True) WrappedTensor = collections.namedtuple("WrappedTensor", ["t", "is_stacked", "is_sparse_stacked"]) """Wrapper around the result of a Tensor conversion. The additional fields are useful for keeping track of the conversion state as data flows through the ops in the loop body. For every op whose output is a Tensor, its converter should return either a WrappedTensor or a list of WrappedTensors. Args: t: The converted tensor is_stacked: True if the tensor is stacked, i.e. represents the results of all the iterations of the loop, where each row i of the tensor corresponds to that op's output on iteration i of the loop. False if the tensor is not stacked, i.e. represents the result of the op on of a single iteration of the loop, where the result does not vary between iterations. is_sparse_stacked: True if the tensor corresponds to a component tensor (indices, values, or dense_shape) of a sparse tensor, and has been logically stacked via a sparse conversion. """ def wrap(tensor, is_stacked=True, is_sparse_stacked=False): """Helper to create a WrappedTensor object.""" assert isinstance(is_stacked, bool) assert isinstance(is_sparse_stacked, bool) assert isinstance(tensor, ops.Tensor) assert not is_sparse_stacked or is_stacked, ("If the wrapped tensor is " "stacked via a sparse " "conversion, it must also be " "stacked.") return WrappedTensor(tensor, is_stacked, is_sparse_stacked) def _fallback_converter(pfor_input): logging.warn("Using a while_loop for converting %s", pfor_input.op_type) output_dtypes = [x.dtype for x in pfor_input.outputs] iters = pfor_input.pfor.loop_len_vector[0] def while_body(i, *ta_list): """Body of while loop.""" inputs = [ x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs ] op_outputs = _create_op( pfor_input.op_type, inputs, output_dtypes, attrs=pfor_input.op.node_def.attr).outputs outputs = [] for out, ta in zip(op_outputs, ta_list): assert isinstance(out, ops.Tensor) outputs.append(ta.write(i, array_ops.expand_dims(out, 0))) return tuple([i + 1] + outputs) ta_list = control_flow_ops.while_loop( lambda i, *ta: i < iters, while_body, [0] + [ tensor_array_ops.TensorArray(dtype, iters) for dtype in output_dtypes ])[1:] return tuple([wrap(ta.concat(), True) for ta in ta_list]) class PFor(object): """Implementation of rewrite of parallel-for loops. This class takes a DAG or a set of DAGs representing the body of a parallel-for loop, and adds new operations to the graph that implements functionality equivalent to running that loop body for a specified number of iterations. This new set of nodes may or may not use a tensorflow loop construct. The process of conversion does not delete or change any existing operations. It only adds operations that efficiently implement the equivalent functionality. We refer to the added ops as "converted ops". The conversion process uses a simple greedy heuristic. It walks the loop body and tries to express the functionality of running each node in a loop with a new set of nodes. When converting an op several cases are possible: - The op is not inside the loop body. Hence it can be used as is. - The op does not depend on the iteration number and is stateless. In this case, it can be used as is. - The op is not stateful, and depends on iteration number only through control dependencies. In this case, we can create a single op with same inputs and attributes, but with "converted" control dependencies. - The op is not stateful, and all its inputs are loop invariant. In this case, similar to above, we can create a single op with same inputs and attributes, but with "converted" control dependencies. - The op is stateful or at least one of the inputs is not loop invariant. In this case, we run the registered converter for that op to create a set of converted ops. All nodes in the set will have converted control dependencies corresponding to control dependencies of the original op. If the op returned multiple outputs, "converted outputs" could be produced by different ops in this set. """ def __init__(self, loop_var, loop_len, pfor_ops, all_indices=None, all_indices_partitioned=False): """Creates an object to rewrite a parallel-for loop. Args: loop_var: ops.Tensor output of a Placeholder operation. The value should be an int32 scalar representing the loop iteration number. loop_len: A scalar or scalar Tensor representing the number of iterations the loop is run for. pfor_ops: List of all ops inside the loop body. all_indices: If not None, an int32 vector with size `loop_len` representing the iteration ids that are still active. These values should be unique and sorted. However they may not be contiguous. This is typically the case when inside a control flow construct which has partitioned the indices of the iterations that are being converted. all_indices_partitioned: If True, this object is being constructed from a control flow construct where not all the pfor iterations are guaranteed to be active. """ assert isinstance(loop_var, ops.Tensor) assert loop_var.op.type == "Placeholder" self._loop_var = loop_var loop_len_value = tensor_util.constant_value(loop_len) if loop_len_value is not None: loop_len = loop_len_value self._loop_len_vector = array_ops.reshape(loop_len, [1]) self._all_indices_partitioned = all_indices_partitioned if all_indices_partitioned: assert all_indices is not None self.all_indices = ( math_ops.range(loop_len) if all_indices is None else all_indices) self._conversion_map = {} self._conversion_map[loop_var] = wrap(self.all_indices, True) self._pfor_ops = set(pfor_ops) self._pfor_op_ids = set([x._id for x in pfor_ops]) def op_is_inside_loop(self, op): """True if op was created inside the pfor loop body.""" assert isinstance(op, ops.Operation) # Note that we use self._pfor_op_ids for the check and not self._pfor_ops # since it appears there tensorflow API could return different python # objects representing the same Operation node. return op._id in self._pfor_op_ids def _convert_sparse(self, y): """Returns the converted value corresponding to SparseTensor y. For SparseTensors, instead of stacking the component tensors separately, resulting in component tensors with shapes (N, m, rank), (N, m), and (N, rank) respectively for indices, values, and dense_shape (where N is the loop length and m is the number of sparse tensor values per loop iter), we want to logically stack the SparseTensors, to create a SparseTensor whose components are size (N * m, rank + 1), (N * m, ), and (rank + 1,) respectively. Here, we try to get the conversion of each component tensor. If the tensors are stacked via a sparse conversion, return the resulting SparseTensor composed of the converted components. Otherwise, the component tensors are either unstacked or stacked naively. In the latter case, we unstack the component tensors to reform loop_len SparseTensor elements, then correctly batch them. The unstacked tensors must have the same rank. Each dimension of each SparseTensor will expand to be the largest among all SparseTensor elements for that dimension. For example, if there are N SparseTensors of rank 3 being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i), the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)). Args: y: A tf.SparseTensor. Returns: A tf.SparseTensor that is the converted value corresponding to y. """ outputs = [ self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape) ] assert all(isinstance(o, WrappedTensor) for o in outputs) if all(w.is_sparse_stacked for w in outputs): return sparse_tensor.SparseTensor(*[w.t for w in outputs]) assert not any(w.is_sparse_stacked for w in outputs), ( "Error converting SparseTensor. All components should be logically " "stacked, or none.") # If component tensors were not sparsely stacked, they are either unstacked # or stacked without knowledge that they are components of sparse tensors. # In this case, we have to restack them. return self._restack_sparse_tensor_logically( *[self._unwrap_or_tile(w) for w in outputs]) def _restack_sparse_tensor_logically(self, indices, values, shape): sparse_tensor_rank = indices.get_shape()[-1].value if sparse_tensor_rank is not None: sparse_tensor_rank += 1 def map_fn(args): res = gen_sparse_ops.serialize_sparse( args[0], args[1], args[2], out_type=dtypes.variant) return res # Applies a map function to the component tensors to serialize each # sparse tensor element and batch them all, then deserializes the batch. # TODO(rachelim): Try to do this without map_fn -- add the right offsets # to shape and indices tensors instead. result = functional_ops.map_fn( map_fn, [indices, values, shape], dtype=dtypes.variant) return sparse_ops.deserialize_sparse( result, dtype=values.dtype, rank=sparse_tensor_rank) def _unwrap_or_tile(self, wrapped_tensor): """Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it.""" output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked if is_stacked: return output else: return _stack(output, self._loop_len_vector).t def convert(self, y): """Returns the converted value corresponding to y. Args: y: A ops.Tensor or a ops.Operation object. If latter, y should not have any outputs. Returns: If y does not need to be converted, it returns y as is. Else it returns the "converted value" corresponding to y. """ if y is None: return None if isinstance(y, sparse_tensor.SparseTensor): return self._convert_sparse(y) output = self._convert_helper(y) if isinstance(output, WrappedTensor): assert isinstance(y, ops.Tensor) return self._unwrap_or_tile(output) else: assert isinstance(y, ops.Operation) assert not y.outputs assert isinstance(output, ops.Operation) return output def _was_converted(self, t): """True if t is not a conversion of itself.""" converted_t = self._conversion_map[t] return converted_t.t is not t def _add_conversion(self, old_output, new_output): self._conversion_map[old_output] = new_output def _convert_helper(self, op_or_tensor): stack = [op_or_tensor] while stack: y = stack[0] if y in self._conversion_map: assert isinstance(self._conversion_map[y], (WrappedTensor, ops.Operation)) stack.pop(0) continue if isinstance(y, ops.Operation): assert not y.outputs, ( "We only support converting Operation objects with no outputs. " "Got %s", y) y_op = y else: assert isinstance(y, ops.Tensor), y y_op = y.op is_while_loop = y_op.type == "Exit" if is_while_loop: while_op = WhileOp(y, pfor_ops=self._pfor_ops) is_inside_loop = while_op.is_inside_loop # If all nodes in the while_loop graph were created inside the pfor, we # treat the whole loop subgraph as a single op (y_op) and try to convert # it. For while_loops that are created completely or partially outside, # we treat them as external and should be able to simply return the Exit # node output as is without needing any conversion. Note that for # while_loops that are partially constructed inside, we assume they will # be loop invariant. If that is not the case, it will create runtime # errors since the converted graph would depend on the self._loop_var # placeholder. if is_inside_loop: y_op = while_op else: is_inside_loop = self.op_is_inside_loop(y_op) # If this op was not created inside the loop body, we will return as is. # 1. Convert inputs and control inputs. def _add_to_stack(x): if x not in self._conversion_map: stack.insert(0, x) return True else: return False if is_inside_loop: added_to_stack = False for inp in y_op.inputs: added_to_stack |= _add_to_stack(inp) for cinp in y_op.control_inputs: if cinp.outputs: for t in cinp.outputs: added_to_stack |= _add_to_stack(t) else: added_to_stack |= _add_to_stack(cinp) if added_to_stack: continue converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs] some_input_converted = any( [self._was_converted(x) for x in y_op.inputs]) some_input_stacked = any([x.is_stacked for x in converted_inputs]) converted_control_ops = set() some_control_input_converted = False for cinp in y_op.control_inputs: if cinp.outputs: for t in cinp.outputs: converted_t = self._conversion_map[t] if self._was_converted(t): some_control_input_converted = True converted_control_ops.add(converted_t.t.op) else: converted_cinp = self._conversion_map[cinp] assert isinstance(converted_cinp, ops.Operation) if converted_cinp != cinp: some_control_input_converted = True converted_control_ops.add(converted_cinp) converted_control_ops = list(converted_control_ops) is_stateful = _is_stateful_pfor_op(y_op) else: converted_inputs = [] converted_control_ops = [] logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op, converted_inputs, converted_control_ops) # 2. Convert y_op # If converting a while_loop, we let the while_loop convertor deal with # putting the control dependencies appropriately. control_dependencies = [] if is_while_loop else converted_control_ops with ops.control_dependencies(control_dependencies), ops.name_scope( y_op.name + "/pfor/"): # None of the inputs and control inputs were converted. if (not is_inside_loop or (not is_stateful and not some_input_converted and not some_control_input_converted)): if y == y_op: assert not isinstance(y_op, WhileOp) new_outputs = y_op else: new_outputs = [wrap(x, False) for x in y_op.outputs] elif not (is_stateful or is_while_loop or some_input_stacked): # All inputs are unstacked or uncoverted but some control inputs are # converted. # TODO(rachelim): Handle the case where some inputs are sparsely # stacked (i.e. any([x.is_sparse_stacked for x in converted_inputs])) new_op = _create_op(y_op.type, [x.t for x in converted_inputs], [x.dtype for x in y_op.outputs], y_op.node_def.attr) if y == y_op: new_outputs = new_op else: new_outputs = [wrap(x, False) for x in new_op.outputs] else: # Either some inputs are not loop invariant or op is stateful. if hasattr(y_op, "pfor_converter"): converter = y_op.pfor_converter else: converter = _pfor_converter_registry.get(y_op.type, None) if converter is None: if flags.FLAGS.op_conversion_fallback_to_while_loop: converter = _fallback_converter else: raise ValueError( "No converter defined for %s\n%s\ninputs: %s. " "\nEither add a converter or set " "--op_conversion_fallback_to_while_loop=True, " "which may run slower" % (y_op.type, y_op, converted_inputs)) # TODO(rachelim): Handle the case where some inputs are sparsely # stacked. We should only call the converter if it supports handling # those inputs. new_outputs = converter(_PforInput(self, y_op, converted_inputs)) if isinstance(new_outputs, WrappedTensor): new_outputs = [new_outputs] assert isinstance(new_outputs, (list, tuple, ops.Operation)), new_outputs logging.vlog(2, "converted %s %s", y_op, new_outputs) # Insert into self._conversion_map if y == y_op: assert isinstance(new_outputs, ops.Operation) self._add_conversion(y_op, new_outputs) else: for old_output, new_output in zip(y_op.outputs, new_outputs): assert isinstance(new_output, WrappedTensor), (new_output, y, y_op) self._add_conversion(old_output, new_output) stack.pop(0) return self._conversion_map[op_or_tensor] @property def loop_len_vector(self): """Returns a single element vector whose value is number of iterations.""" return self._loop_len_vector @property def loop_var(self): """Returns placeholder loop variable.""" return self._loop_var @property def pfor_ops(self): return self._pfor_ops @property def all_indices_partitioned(self): """all_indices_partitioned property. Returns: True if we are inside a control flow construct and not all pfor iterations may be active. """ return self._all_indices_partitioned # nn_ops def _flatten_first_two_dims(x): """Merges first two dimensions.""" old_shape = array_ops.shape(x) new_shape = array_ops.concat([[-1], old_shape[2:]], axis=0) return array_ops.reshape(x, new_shape) def _unflatten_first_dim(x, first_dim): """Splits first dimension into [first_dim, -1].""" old_shape = array_ops.shape(x) new_shape = array_ops.concat([first_dim, [-1], old_shape[1:]], axis=0) return array_ops.reshape(x, new_shape) def _inputs_with_flattening(pfor_input, input_indices): """Stacks and flattens first dim of inputs at indices `input_indices`.""" if input_indices is None: input_indices = [] pfor_input.stack_inputs(stack_indices=input_indices) inputs = [] for i in range(pfor_input.num_inputs): if i in input_indices: inp = pfor_input.stacked_input(i) inp = _flatten_first_two_dims(inp) else: inp = pfor_input.unstacked_input(i) inputs.append(inp) return inputs @RegisterPForWithArgs("Conv2D", dims=[0]) @RegisterPForWithArgs("AvgPool", dims=[0]) @RegisterPForWithArgs("MaxPool", dims=[0]) @RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2]) @RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1]) def _convert_flatten_batch(pfor_input, op_type, dims): del op_type inputs = _inputs_with_flattening(pfor_input, dims) outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs n = pfor_input.pfor.loop_len_vector outputs = [_unflatten_first_dim(x, n) for x in outputs] return [wrap(x, True) for x in outputs] _channel_flatten_input_cache = {} def _channel_flatten_input(x, data_format): """Merge the stack dimension with the channel dimension. If S is pfor's stacking dimension, then, - for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose should be cheap. - for SNHWC, we transpose to NHWCS. We then merge the S and C dimension. Args: x: ops.Tensor to transform. data_format: "NCHW" or "NHWC". Returns: A 3-element tuple with the transformed value, along with the shape for reshape and order for transpose required to transform back. """ graph = ops.get_default_graph() cache_key = (graph, x, data_format) if cache_key not in _channel_flatten_input_cache: x_shape = array_ops.shape(x) if data_format == b"NCHW": order = [1, 0, 2, 3, 4] shape = array_ops.concat([x_shape[1:2], [-1], x_shape[3:]], axis=0) reverse_order = order else: order = [1, 2, 3, 0, 4] shape = array_ops.concat([x_shape[1:4], [-1]], axis=0) reverse_order = [3, 0, 1, 2, 4] # Move S dimension next to C dimension. x = array_ops.transpose(x, order) reverse_shape = array_ops.shape(x) # Reshape to merge the S and C dimension. x = array_ops.reshape(x, shape) outputs = x, reverse_order, reverse_shape _channel_flatten_input_cache[cache_key] = outputs else: outputs = _channel_flatten_input_cache[cache_key] return outputs # Note that with training=True, running FusedBatchNorm on individual examples # is very different from running FusedBatchNorm on a batch of those examples. # This is because, for the latter case, the operation can be considered as first # computing the mean and variance over all the examples and then using these # to scale all those examples. This creates a data dependency between these # different "iterations" since the inputs to the scaling step depends on the # statistics coming from all these inputs. # As with other kernels, the conversion here effectively runs the kernel # independently for each iteration, and returns outputs by stacking outputs from # each of those iterations. @RegisterPFor("FusedBatchNorm") def _convert_fused_batch_norm(pfor_input): is_training = pfor_input.get_attr("is_training") # When BatchNorm is used with training=False, mean and variance are provided # externally and used as is by the op. Thus, we can merge the S and N # dimensions as we do for regular operations. # When BatchNorm is used with training=True, mean and variance are computed # for each channel across the batch dimension (first one). If we merge S and N # dimensions, mean and variances will be computed over a larger set. So, we # merge the S and C dimensions instead. if not is_training: # We return zeros for batch_mean and batch_variance output. Note that CPU # and GPU seem to have different behavior for those two outputs. CPU outputs # zero because these values are not used during inference. GPU outputs # something, probably real means and variances. inputs = _inputs_with_flattening(pfor_input, [0]) outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs y = outputs[0] n = pfor_input.pfor.loop_len_vector y = _unflatten_first_dim(y, n) mean = pfor_input.unstacked_input(3) zeros = array_ops.zeros_like(mean) return [wrap(y, True), wrap(zeros, False), wrap(zeros, False)] pfor_input.stack_inputs() data_format = pfor_input.get_attr("data_format") # We merge the first dimension with the "C" dimension, run FusedBatchNorm, and # then transpose back. x = pfor_input.stacked_input(0) x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format) # Note that we stack all the other inputs as well so that they are the same # size as the new size of the channel dimension. inputs = [x] + [ array_ops.reshape(pfor_input.stacked_input(i), [-1]) for i in range(1, pfor_input.num_inputs) ] outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs y = outputs[0] y = array_ops.reshape(y, reverse_shape) y = array_ops.transpose(y, reverse_order) n = pfor_input.pfor.loop_len_vector outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]] outputs = [y] + outputs return [wrap(x, True) for x in outputs] @RegisterPFor("FusedBatchNormGrad") def _convert_fused_batch_norm_grad(pfor_input): pfor_input.stack_inputs() data_format = pfor_input.get_attr("data_format") y_backprop = pfor_input.stacked_input(0) y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format) x = pfor_input.stacked_input(1) x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format) inputs = [y_backprop, x] + [ array_ops.reshape(pfor_input.stacked_input(i), [-1]) for i in range(2, pfor_input.num_inputs) ] outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs x_backprop = outputs[0] x_backprop = array_ops.reshape(x_backprop, x_reverse_shape) x_backprop = array_ops.transpose(x_backprop, x_reverse_order) n = pfor_input.pfor.loop_len_vector outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]] outputs = [x_backprop] + outputs return [wrap(output, True) for output in outputs] @RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0) @RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0) def _convert_flatten_batch_shape_input(pfor_input, op_type, flatten_dims, shape_dim): del op_type inputs = _inputs_with_flattening(pfor_input, flatten_dims) n = pfor_input.pfor.loop_len_vector # Adjust the `input_sizes` input. ones = array_ops.ones( [array_ops.shape(inputs[shape_dim])[0] - 1], dtype=n.dtype) inputs[shape_dim] *= array_ops.concat([n, ones], axis=0) outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs outputs = [_unflatten_first_dim(x, n) for x in outputs] return [wrap(x, True) for x in outputs] @RegisterPFor("Conv2DBackpropFilter") def _convert_conv2d_backprop_filter(pfor_input): pfor_input.stack_inputs(stack_indices=[2]) inputs, inputs_stacked, _ = pfor_input.input(0) filter_sizes = pfor_input.unstacked_input(1) grads = pfor_input.stacked_input(2) strides = pfor_input.get_attr("strides") padding = pfor_input.get_attr("padding") use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu") data_format = pfor_input.get_attr("data_format") dilations = pfor_input.get_attr("dilations") if inputs_stacked: # TODO(agarwal): Implement this efficiently. logging.warn("Conv2DBackpropFilter uses a while_loop. Fix that!") def while_body(i, ta): inp_i = inputs[i, ...] grad_i = grads[i, ...] output = nn_ops.conv2d_backprop_filter( inp_i, filter_sizes, grad_i, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, dilations=dilations) return i + 1, ta.write(i, array_ops.expand_dims(output, 0)) n = array_ops.reshape(pfor_input.pfor.loop_len_vector, []) _, ta = control_flow_ops.while_loop( lambda i, ta: i < n, while_body, (0, tensor_array_ops.TensorArray(inputs.dtype, n))) output = ta.concat() return wrap(output, True) else: # We merge the stack dimension with the channel dimension of the gradients # and pretend we had a larger filter (see change to filter_sizes below). # Once the filter backprop is computed, we reshape and transpose back # appropriately. grads, _, _ = _channel_flatten_input(grads, data_format) n = pfor_input.pfor.loop_len_vector old_filter_sizes = filter_sizes filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0) output = nn_ops.conv2d_backprop_filter( inputs, filter_sizes, grads, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, dilations=dilations) new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0) output = array_ops.reshape(output, new_filter_shape) output = array_ops.transpose(output, [3, 0, 1, 2, 4]) return wrap(output, True) # array_ops @RegisterPForWithArgs("Identity", array_ops.identity) @RegisterPForWithArgs("StopGradient", array_ops.stop_gradient) def _convert_identity(pfor_input, op_type, op_func): del op_type return wrap(op_func(*[x.t for x in pfor_input.inputs]), True) @RegisterPFor("Reshape") def _convert_reshape(pfor_input): t = pfor_input.stacked_input(0) shape = pfor_input.unstacked_input(1) new_dim = array_ops.shape(t)[:1] new_shape = array_ops.concat([new_dim, shape], axis=0) return wrap(array_ops.reshape(t, new_shape), True) @RegisterPFor("ExpandDims") def _convert_expanddims(pfor_input): t = pfor_input.stacked_input(0) dim = pfor_input.unstacked_input(1) dim += math_ops.cast(dim >= 0, dtypes.int32) return wrap(array_ops.expand_dims(t, axis=dim), True) @RegisterPFor("Slice") def _convert_slice(pfor_input): t = pfor_input.stacked_input(0) begin = pfor_input.unstacked_input(1) size = pfor_input.unstacked_input(2) begin = array_ops.concat([[0], begin], axis=0) size = array_ops.concat([[-1], size], axis=0) return wrap(array_ops.slice(t, begin, size), True) @RegisterPFor("Tile") def _convert_tile(pfor_input): t = pfor_input.stacked_input(0) multiples = pfor_input.unstacked_input(1) multiples = array_ops.concat([[1], multiples], 0) return wrap(array_ops.tile(t, multiples), True) @RegisterPFor("Pack") def _convert_pack(pfor_input): pfor_input.stack_inputs() axis = pfor_input.get_attr("axis") if axis >= 0: axis += 1 return wrap( array_ops.stack([x.t for x in pfor_input.inputs], axis=axis), True) @RegisterPFor("Unpack") def _convert_unpack(pfor_input): value = pfor_input.stacked_input(0) axis = pfor_input.get_attr("axis") if axis >= 0: axis += 1 num = pfor_input.get_attr("num") return [wrap(x, True) for x in array_ops.unstack(value, axis=axis, num=num)] @RegisterPFor("Pad") def _convert_pad(pfor_input): t = pfor_input.stacked_input(0) paddings = pfor_input.unstacked_input(1) paddings = array_ops.concat([[[0, 0]], paddings], 0) return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True) @RegisterPFor("Split") def _convert_split(pfor_input): split_dim = pfor_input.unstacked_input(0) t = pfor_input.stacked_input(1) num_split = pfor_input.get_attr("num_split") split_dim += math_ops.cast(split_dim >= 0, dtypes.int32) return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)] @RegisterPFor("Transpose") def _convert_transpose(pfor_input): t = pfor_input.stacked_input(0) perm = pfor_input.unstacked_input(1) new_perm = array_ops.concat([[0], perm + 1], axis=0) return wrap(array_ops.transpose(t, new_perm), True) @RegisterPFor("ZerosLike") def _convert_zeroslike(pfor_input): t = pfor_input.stacked_input(0) shape = array_ops.shape(t)[1:] return wrap(array_ops.zeros(shape, dtype=t.dtype), False) @RegisterPFor("Gather") @RegisterPFor("GatherV2") def _convert_gather(pfor_input): param, param_stacked, _ = pfor_input.input(0) indices, indices_stacked, _ = pfor_input.input(1) op_type = pfor_input.op_type if op_type == "Gather": validate_indices = pfor_input.get_attr("validate_indices") axis = 0 else: validate_indices = None axis = pfor_input.unstacked_input(2) axis_value = tensor_util.constant_value(axis) if axis_value is not None: axis = axis_value if indices_stacked and not param_stacked: if indices == pfor_input.pfor.all_indices and axis == 0: param_shape0 = param.shape[0].value indices_shape0 = indices.shape[0].value if param_shape0 is not None and indices_shape0 == param_shape0: # Note that with loops and conditionals, indices may not be contiguous. # However they will be sorted and unique. So if the shape matches, then # it must be picking up all the rows of param. return wrap(param, True) # TODO(agarwal): use array_ops.slice here. output = array_ops.gather( param, indices, validate_indices=validate_indices, axis=axis) if axis != 0: axis = control_flow_ops.cond( axis < 0, lambda: axis + array_ops.rank(param), lambda: axis) order = array_ops.concat( [[axis], math_ops.range(axis), math_ops.range(axis + 1, array_ops.rank(output))], axis=0) output = control_flow_ops.cond( math_ops.equal(axis, 0), lambda: output, lambda: array_ops.transpose(output, order)) return wrap(output, True) if param_stacked: loop_len_vector = pfor_input.pfor.loop_len_vector pfor_input.stack_inputs(stack_indices=[1]) indices = pfor_input.stacked_input(1) param_flat = _flatten_first_two_dims(param) # Recompute indices to handle stacked param. indices_offset = math_ops.range( loop_len_vector[0]) * array_ops.shape(param)[1] # Reshape indices_offset to allow broadcast addition ones = array_ops.ones([array_ops.rank(indices) - 1], dtype=dtypes.int32) new_shape = array_ops.concat([loop_len_vector, ones], axis=0) indices_offset = array_ops.reshape(indices_offset, new_shape) indices += indices_offset # TODO(agarwal): handle axis != 0. May need to transpose param or # array_ops.gather_nd. if isinstance(axis, ops.Tensor): axis_value = tensor_util.constant_value(axis) else: try: axis_value = int(axis) except TypeError: axis_value = None msg = ("Gather, where indices and param are both loop dependent, currently " "requires axis=0") if axis_value is not None and axis_value != 0: raise ValueError("Error while converting %s. %s. Got axis=%d" % (pfor_input.op, msg, axis)) with ops.control_dependencies( [check_ops.assert_equal(axis, 0, message=msg)]): output = array_ops.gather(param_flat, indices) return wrap(output, True) @RegisterPFor("ConcatV2") def _convert_concatv2(pfor_input): n = pfor_input.num_inputs pfor_input.stack_inputs(stack_indices=range(n - 1)) axis = pfor_input.unstacked_input(n - 1) axis += math_ops.cast(axis >= 0, axis.dtype) return wrap( array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis), True) @RegisterPFor("StridedSlice") def _convert_strided_slice(pfor_input): inp = pfor_input.stacked_input(0) begin = pfor_input.unstacked_input(1) end = pfor_input.unstacked_input(2) strides = pfor_input.unstacked_input(3) begin_mask = pfor_input.get_attr("begin_mask") end_mask = pfor_input.get_attr("end_mask") ellipsis_mask = pfor_input.get_attr("ellipsis_mask") new_axis_mask = pfor_input.get_attr("new_axis_mask") shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask") begin = array_ops.concat([[0], begin], axis=0) end = array_ops.concat([[0], end], axis=0) strides = array_ops.concat([[1], strides], axis=0) begin_mask = begin_mask << 1 | 1 end_mask = end_mask << 1 | 1 ellipsis_mask <<= 1 new_axis_mask <<= 1 shrink_axis_mask <<= 1 return wrap( array_ops.strided_slice( inp, begin, end, strides, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask), True) @RegisterPFor("StridedSliceGrad") def _convert_strided_slice_grad(pfor_input): shape = pfor_input.unstacked_input(0) begin = pfor_input.unstacked_input(1) end = pfor_input.unstacked_input(2) strides = pfor_input.unstacked_input(3) dy = pfor_input.stacked_input(4) begin_mask = pfor_input.get_attr("begin_mask") end_mask = pfor_input.get_attr("end_mask") ellipsis_mask = pfor_input.get_attr("ellipsis_mask") new_axis_mask = pfor_input.get_attr("new_axis_mask") shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask") shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0) begin = array_ops.concat([[0], begin], axis=0) end = array_ops.concat([[0], end], axis=0) strides = array_ops.concat([[1], strides], axis=0) begin_mask = begin_mask << 1 | 1 end_mask = end_mask << 1 | 1 ellipsis_mask <<= 1 new_axis_mask <<= 1 shrink_axis_mask <<= 1 return wrap( array_ops.strided_slice_grad( shape, begin, end, strides, dy, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask), True) # math_ops @RegisterPFor("MatMul") def _convert_matmul(pfor_input): # TODO(agarwal): Check if tiling is faster than two transposes. a, a_stacked, _ = pfor_input.input(0) b, b_stacked, _ = pfor_input.input(1) tr_a = pfor_input.get_attr("transpose_a") tr_b = pfor_input.get_attr("transpose_b") if a_stacked and b_stacked: output = wrap(math_ops.matmul(a, b, adjoint_a=tr_a, adjoint_b=tr_b), True) return output elif a_stacked: if tr_a: a = array_ops.transpose(a, [0, 2, 1]) if a.shape.is_fully_defined(): x, y, z = a.shape else: x, y, z = [ array_ops.reshape(i, []) for i in array_ops.split(array_ops.shape(a), 3) ] a = array_ops.reshape(a, [x * y, z]) prod = math_ops.matmul(a, b, transpose_b=tr_b) return wrap(array_ops.reshape(prod, [x, y, -1]), True) else: assert b_stacked if tr_b: perm = [2, 0, 1] b = array_ops.transpose(b, perm) else: # As an optimization, if one of the first two dimensions is 1, then we can # reshape instead of transpose. # TODO(agarwal): This check can be done inside Transpose kernel. b_shape = array_ops.shape(b) min_dim = math_ops.minimum(b_shape[0], b_shape[1]) perm = control_flow_ops.cond( math_ops.equal(min_dim, 1), lambda: [0, 1, 2], lambda: [1, 0, 2]) new_shape = array_ops.stack([b_shape[1], b_shape[0], b_shape[2]]) b = array_ops.transpose(b, perm) b = array_ops.reshape(b, new_shape) if b.shape.is_fully_defined(): x, y, z = b.shape else: x, y, z = [ array_ops.reshape(i, []) for i in array_ops.split(array_ops.shape(b), 3) ] b = array_ops.reshape(b, [x, y * z]) prod = math_ops.matmul(a, b, transpose_a=tr_a) prod = array_ops.reshape(prod, [-1, y, z]) prod = array_ops.transpose(prod, [1, 0, 2]) return wrap(prod, True) @RegisterPFor("BatchMatMul") def _convert_batch_mat_mul(pfor_input): # TODO(agarwal): There may be a more efficient way to do this instead of # stacking the inputs. pfor_input.stack_inputs() x = pfor_input.stacked_input(0) y = pfor_input.stacked_input(1) adj_x = pfor_input.get_attr("adj_x") adj_y = pfor_input.get_attr("adj_y") x = _flatten_first_two_dims(x) y = _flatten_first_two_dims(y) output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y) output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector) return wrap(output, True) @RegisterPForWithArgs("Sum", math_ops.reduce_sum) @RegisterPForWithArgs("Prod", math_ops.reduce_prod) @RegisterPForWithArgs("Max", math_ops.reduce_max) @RegisterPForWithArgs("Min", math_ops.reduce_min) def _convert_reduction(pfor_input, _, op_func): t = pfor_input.stacked_input(0) indices = pfor_input.unstacked_input(1) # Shift positive indices by one to account for the extra dimension. indices += math_ops.cast(indices >= 0, dtypes.int32) keep_dims = pfor_input.get_attr("keep_dims") return wrap(op_func(t, indices, keepdims=keep_dims), True) @RegisterPForWithArgs("Cumsum", math_ops.cumsum) @RegisterPForWithArgs("Cumprod", math_ops.cumprod) def _convert_cumfoo(pfor_input, _, op_func): t = pfor_input.stacked_input(0) axis = pfor_input.unstacked_input(1) # Shift positive indices by one to account for the extra dimension. axis += math_ops.cast(axis >= 0, dtypes.int32) exclusive = pfor_input.get_attr("exclusive") reverse = pfor_input.get_attr("reverse") return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True) @RegisterPFor("BiasAdd") def _convert_biasadd(pfor_input): t = pfor_input.stacked_input(0) bias = pfor_input.unstacked_input(1) data_format = pfor_input.get_attr("data_format") if data_format != b"NCHW": return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True) shape = array_ops.shape(t) flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0) t = array_ops.reshape(t, flattened_shape) t = nn_ops.bias_add(t, bias, data_format=b"NCHW") t = array_ops.reshape(t, shape) return wrap(t, True) @RegisterPFor("UnsortedSegmentSum") def _convert_unsortedsegmentsum(pfor_input): data, data_stacked, _ = pfor_input.input(0) # TODO(agarwal): handle unstacked? segment_ids = pfor_input.stacked_input(1) # TODO(agarwal): handle stacked? num_segments = pfor_input.unstacked_input(2) if not data_stacked: data = _stack(data, pfor_input.pfor.loop_len_vector).t segment_shape = array_ops.shape(segment_ids) n = segment_shape[0] ones = array_ops.ones_like(segment_shape)[1:] segment_offset = num_segments * math_ops.range(n) segment_offset = array_ops.reshape(segment_offset, array_ops.concat([[n], ones], axis=0)) segment_ids += segment_offset num_segments *= n output = math_ops.unsorted_segment_sum(data, segment_ids, num_segments) new_output_shape = array_ops.concat( [[n, -1], array_ops.shape(output)[1:]], axis=0) output = array_ops.reshape(output, new_output_shape) return wrap(output, True) @RegisterPFor("Cast") def _convert_cast(pfor_input): inp = pfor_input.stacked_input(0) dtype = pfor_input.get_attr("DstT") return wrap(math_ops.cast(inp, dtype), True) @RegisterPForWithArgs("Abs", math_ops.abs) @RegisterPForWithArgs("Acosh", math_ops.acosh) @RegisterPForWithArgs("Acos", math_ops.acos) @RegisterPForWithArgs("Add", math_ops.add) @RegisterPForWithArgs("AddV2", math_ops.add_v2) @RegisterPForWithArgs("Angle", math_ops.angle) @RegisterPForWithArgs("Asinh", math_ops.asinh) @RegisterPForWithArgs("Asin", math_ops.asin) @RegisterPForWithArgs("Atan2", math_ops.atan2) @RegisterPForWithArgs("Atanh", math_ops.atanh) @RegisterPForWithArgs("Atan", math_ops.atan) @RegisterPForWithArgs("BesselI0e", math_ops.bessel_i0e) @RegisterPForWithArgs("BesselI1e", math_ops.bessel_i1e) @RegisterPForWithArgs("BitwiseAnd", bitwise_ops.bitwise_and) @RegisterPForWithArgs("BitwiseOr", bitwise_ops.bitwise_or) @RegisterPForWithArgs("BitwiseXor", bitwise_ops.bitwise_xor) @RegisterPForWithArgs("Ceil", math_ops.ceil) @RegisterPForWithArgs("ComplexAbs", math_ops.complex_abs) @RegisterPForWithArgs("Complex", math_ops.complex) @RegisterPForWithArgs("Conj", math_ops.conj) @RegisterPForWithArgs("Cosh", math_ops.cosh) @RegisterPForWithArgs("Cos", math_ops.cos) @RegisterPForWithArgs("Digamma", math_ops.digamma) @RegisterPForWithArgs("Div", math_ops.div) @RegisterPForWithArgs("DivNoNan", math_ops.div_no_nan) @RegisterPForWithArgs("Elu", nn_ops.elu) @RegisterPForWithArgs("Equal", math_ops.equal) @RegisterPForWithArgs("Erfc", math_ops.erfc) @RegisterPForWithArgs("Erf", math_ops.erf) @RegisterPForWithArgs("Expm1", math_ops.expm1) @RegisterPForWithArgs("Exp", math_ops.exp) @RegisterPForWithArgs("FloorDiv", math_ops.floor_div) @RegisterPForWithArgs("Floor", math_ops.floor) @RegisterPForWithArgs("FloorMod", math_ops.floor_mod) @RegisterPForWithArgs("GreaterEqual", math_ops.greater_equal) @RegisterPForWithArgs("Greater", math_ops.greater) @RegisterPForWithArgs("Igammac", math_ops.igammac) @RegisterPForWithArgs("IgammaGradA", math_ops.igamma_grad_a) @RegisterPForWithArgs("Igamma", math_ops.igamma) @RegisterPForWithArgs("Imag", math_ops.imag) @RegisterPForWithArgs("Invert", bitwise_ops.invert) @RegisterPForWithArgs("Inv", math_ops.inv) @RegisterPForWithArgs("IsFinite", math_ops.is_finite) @RegisterPForWithArgs("IsInf", math_ops.is_inf) @RegisterPForWithArgs("LeftShift", bitwise_ops.left_shift) @RegisterPForWithArgs("LessEqual", math_ops.less_equal) @RegisterPForWithArgs("Less", math_ops.less) @RegisterPForWithArgs("Lgamma", math_ops.lgamma) @RegisterPForWithArgs("Log1p", math_ops.log1p) @RegisterPForWithArgs("LogicalAnd", math_ops.logical_and) @RegisterPForWithArgs("LogicalNot", math_ops.logical_not) @RegisterPForWithArgs("LogicalOr", math_ops.logical_or) @RegisterPForWithArgs("LogicalXor", math_ops.logical_xor) @RegisterPForWithArgs("Log", math_ops.log) @RegisterPForWithArgs("Maximum", math_ops.maximum) @RegisterPForWithArgs("Minimum", math_ops.minimum) @RegisterPForWithArgs("Mod", math_ops.mod) @RegisterPForWithArgs("Mul", math_ops.multiply) @RegisterPForWithArgs("Neg", math_ops.negative) @RegisterPForWithArgs("NotEqual", math_ops.not_equal) @RegisterPForWithArgs("Polygamma", math_ops.polygamma) @RegisterPForWithArgs("Pow", math_ops.pow) @RegisterPForWithArgs("RealDiv", math_ops.divide) @RegisterPForWithArgs("Real", math_ops.real) @RegisterPForWithArgs("Reciprocal", math_ops.reciprocal) @RegisterPForWithArgs("Relu6", nn_ops.relu6) @RegisterPForWithArgs("Relu", nn_ops.relu) @RegisterPForWithArgs("RightShift", bitwise_ops.right_shift) @RegisterPForWithArgs("Rint", math_ops.rint) @RegisterPForWithArgs("Round", math_ops.round) @RegisterPForWithArgs("Rsqrt", math_ops.rsqrt) @RegisterPForWithArgs("Selu", nn_ops.selu) @RegisterPForWithArgs("Sigmoid", math_ops.sigmoid) @RegisterPForWithArgs("Sign", math_ops.sign) @RegisterPForWithArgs("Sinh", math_ops.sinh) @RegisterPForWithArgs("Sin", math_ops.sin) @RegisterPForWithArgs("Softplus", nn_ops.softplus) @RegisterPForWithArgs("Softsign", nn_ops.softsign) @RegisterPForWithArgs("Sqrt", math_ops.sqrt) @RegisterPForWithArgs("SquaredDifference", math_ops.squared_difference) @RegisterPForWithArgs("Square", math_ops.square) @RegisterPForWithArgs("Sub", math_ops.subtract) @RegisterPForWithArgs("Tanh", math_ops.tanh) @RegisterPForWithArgs("Tan", math_ops.tan) @RegisterPForWithArgs("TruncateDiv", math_ops.truncate_div) @RegisterPForWithArgs("TruncateMod", math_ops.truncate_mod) @RegisterPForWithArgs("Zeta", math_ops.zeta) def _convert_cwise(pfor_input, op_type, op_func): # Note that ops handled here do not have attributes except "T" and "Tout", and # hence don't need extra arguments passed to the cwise_op call below. for attr in pfor_input.op.node_def.attr.keys(): assert attr in [u"T", u"Tout"], (op_type, attr) pfor_input.expanddim_inputs_for_broadcast() return wrap(op_func(*[x.t for x in pfor_input.inputs]), True) @RegisterPFor("ApproximateEqual") def _convert_approximate_equal(pfor_input): pfor_input.expanddim_inputs_for_broadcast() x = pfor_input.input(0)[0] y = pfor_input.input(1)[0] tolerance = pfor_input.get_attr("tolerance") return wrap(math_ops.approximate_equal(x, y, tolerance=tolerance), True) @RegisterPFor("Shape") def _convert_shape(pfor_input): out_type = pfor_input.get_attr("out_type") return wrap( array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:], False) @RegisterPFor("ShapeN") def _convert_shape_n(pfor_input): out_type = pfor_input.get_attr("out_type") shapes = [ array_ops.shape(x, out_type=out_type)[1:] if stacked else array_ops.shape(x) for x, stacked, _ in pfor_input.inputs ] return [wrap(x, False) for x in shapes] @RegisterPFor("Size") def _convert_size(pfor_input): out_type = pfor_input.get_attr("out_type") n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type) return wrap( array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n, False) @RegisterPFor("Rank") def _convert_rank(pfor_input): return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False) @RegisterPFor("AddN") def _convert_addn(pfor_input): # AddN does not support broadcasting. pfor_input.stack_inputs() return wrap(math_ops.add_n([x.t for x in pfor_input.inputs]), True) @RegisterPFor("BiasAddGrad") def _convert_biasaddgrad(pfor_input): grad = pfor_input.stacked_input(0) fmt = pfor_input.get_attr("data_format") if fmt == b"NCHW": output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False) else: grad_shape = array_ops.shape(grad) last_dim_shape = grad_shape[-1] first_dim_shape = grad_shape[0] output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape]) output = math_ops.reduce_sum(output, axis=[1], keepdims=False) return wrap(output, True) # Some required ops are not exposed under the tf namespace. Hence relying on # _create_op to create them. @RegisterPForWithArgs("EluGrad") @RegisterPForWithArgs("Relu6Grad") @RegisterPForWithArgs("ReluGrad") @RegisterPForWithArgs("SeluGrad") @RegisterPForWithArgs("SigmoidGrad") @RegisterPForWithArgs("SoftplusGrad") @RegisterPForWithArgs("SoftsignGrad") @RegisterPForWithArgs("TanhGrad") @RegisterPForWithArgs("SqrtGrad") @RegisterPForWithArgs("RsqrtGrad") @RegisterPForWithArgs("ReciprocalGrad") def _convert_grads(pfor_input, op_type, *args, **kw_args): del args del kw_args # TODO(agarwal): Looks like these ops don't support broadcasting. Hence we # have to use tiling here. pfor_input.stack_inputs() outputs = _create_op( op_type, [x.t for x in pfor_input.inputs], [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs return [wrap(x, True) for x in outputs] @RegisterPFor("Select") def _convert_select(pfor_input): pfor_input.stack_inputs() cond = pfor_input.stacked_input(0) t = pfor_input.stacked_input(1) e = pfor_input.stacked_input(2) cond_rank = array_ops.rank(cond) cond, t, e = control_flow_ops.cond( cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]), lambda: [cond, t, e]) outputs = _create_op( pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs n = pfor_input.pfor.loop_len_vector out = control_flow_ops.cond(cond_rank > 1, lambda: _unflatten_first_dim(outputs[0], n), lambda: outputs[0]) return [wrap(out, True) for x in outputs] # random_ops @RegisterPForWithArgs("RandomUniform") @RegisterPForWithArgs("RandomUniformInt") @RegisterPForWithArgs("RandomStandardNormal") @RegisterPForWithArgs("TruncatedNormal") @RegisterPForWithArgs("RandomGamma") @RegisterPForWithArgs("RandomPoissonV2") def _convert_random(pfor_input, op_type, *args, **kw_args): del args del kw_args inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)] # inputs[0] is "shape" inputs[0] = array_ops.concat( [pfor_input.pfor.loop_len_vector, inputs[0]], axis=0) logging.warning( "Note that %s inside pfor op may not give same output as " "inside a sequential loop.", op_type) outputs = _create_op( op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs return [wrap(x, True) for x in outputs] # logging_ops @RegisterPFor("Assert") def _convert_assert(pfor_input): cond, cond_stacked, _ = pfor_input.input(0) if cond_stacked: cond = math_ops.reduce_all(cond) data_list = [x.t for x in pfor_input.inputs][1:] return _create_op("Assert", [cond] + data_list, [], attrs=pfor_input.op.node_def.attr) @RegisterPFor("Print") def _convert_print(pfor_input): # Note that we don't stack all the inputs. Hence unstacked values are printed # once here vs multiple times in a while_loop. pfor_input.stack_inputs([0]) outputs = _create_op( "Print", [x.t for x in pfor_input.inputs], [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs return [wrap(x, True) for x in outputs] # data_flow_ops # TensorArray conversion is tricky since we don't support arrays of # TensorArrays. For converting them, we consider two distinct cases: # # 1. The array is constructed outside the pfor call, and read/written inside the # loop. # This is an easier case since we don't need to make an array of TensorArrays. # A correctness requirement is that these parallel iterations shouldn't attempt # to write to the same location. Hence at conversion time we disallow indices to # be loop-invariant as that would guarantee a collision. Even if the indices are # not loop-invariant, they could conflict and that shall trigger runtime errors. # # 2. The array is constructed and used entirely inside each pfor iteration. # For simplicity, here we require that the indices used for write/scatter are # "unstacked". Otherwise it becomes hard to merge the TensorArrays created in # different pfor iterations. We consider two sub_cases: # # 2a Elements written to the array are "stacked" # To simulate multiple TensorArrays, we may increase the dimension of each # element of the array. i.e. the i_th row of the j_th entry of the converted # TensorArray corresponds to the j_th entry of the TensorArray in the i_th # pfor iteration. # # 2b Elements written to the array are "unstacked" # In this case we don't increase the dimensions to avoid redundant tiling. Each # iteration is trying to write the same value. So we convert that to a single # write. # # Here are some tricks used to implement the above: # - TensorArrayV3 constructor encodes the element shape as an attr. Instead of # trying to trace whether future writes are stacked or unstacked in order to set # this attr, we set it to correspond to unknown shape. # - We use the "flow" output of the different ops to track whether the array # elements are stacked or unstacked. If a stacked write/scatter is done, we make # the flow stacked as well. # - We use some heuristic traversal of the graph to track whether the # TensorArray handle was created inside or outside the pfor loop. @RegisterPFor("TensorArrayV3") def _convert_tensor_array_v3(pfor_input): size = pfor_input.unstacked_input(0) dtype = pfor_input.get_attr("dtype") dynamic_size = pfor_input.get_attr("dynamic_size") clear_after_read = pfor_input.get_attr("clear_after_read") identical_element_shapes = pfor_input.get_attr("identical_element_shapes") tensor_array_name = pfor_input.get_attr("tensor_array_name") handle, flow = data_flow_ops.tensor_array_v3( size, dtype=dtype, # We don't set element shape since we don't know if writes are stacked or # not yet. element_shape=None, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) # Note we keep flow unstacked for now since we don't know if writes will be # stacked or not. return wrap(handle, False), wrap(flow, False) @RegisterPFor("TensorArraySizeV3") def _convert_tensor_array_size_v3(pfor_input): handle = pfor_input.unstacked_input(0) flow, flow_stacked, _ = pfor_input.input(1) if flow_stacked: flow = _unstack_flow(flow) size = data_flow_ops.tensor_array_size_v3(handle, flow) return wrap(size, False) def _handle_inside_pfor(pfor_input, handle): """Returns True if handle was created inside the pfor loop.""" # We use some heuristic to find the original TensorArray creation op. # The logic should handle the common cases (except cond based subgraphs). # In theory the user could perform different operations on the handle (like # Reshape, stack multiple handles, etc) which could break this logic. # TODO(agarwal): handle Switch/Merge. while handle.op.type in ("Enter", "Identity"): handle = handle.op.inputs[0] if handle.op.type not in [ "TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"]: raise ValueError("Unable to find source for handle %s" % handle) else: return pfor_input.pfor.op_is_inside_loop(handle.op) def _unstack_flow(value): # TODO(agarwal): consider looking if this is a Tile op then get its input. # This may avoid running the Tile operations. return array_ops.gather(value, 0) @RegisterPFor("TensorArrayReadV3") def _convert_tensor_array_read_v3(pfor_input): handle = pfor_input.unstacked_input(0) index, index_stacked, _ = pfor_input.input(1) dtype = pfor_input.get_attr("dtype") flow, flow_stacked, _ = pfor_input.input(2) if flow_stacked: flow = _unstack_flow(flow) is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0]) if is_inside_pfor: # Note that if we are inside a control flow construct inside the pfor, and # only some of the iterations are doing the read (i.e. # `all_indices_partitioned` is True), then the read operation should only # return values for the currently active pfor iterations (`all_indices` # below). Hence, whenever the returned value is stacked (i.e. `flow` is # stacked), we may need to do an extra gather after reading the values. Also # note that if `is_inside` is false, then values in the tensor array are # unstacked. So the check is only needed in this branch. all_indices = pfor_input.pfor.all_indices all_indices_partitioned = pfor_input.pfor.all_indices_partitioned # Note: flow_stacked indicates if values in the TensorArray are stacked or # not. if index_stacked: if flow_stacked: raise ValueError( "It looks like TensorArrayReadV3 was called on a TensorArray whose" " values are not loop-invariant, and the read indices were also" " not loop invariant. This is currently unsupported.") value = data_flow_ops.tensor_array_gather_v3( handle, index, flow, dtype=dtype) return wrap(value, True) value = data_flow_ops.tensor_array_read_v3( handle, index, flow, dtype=dtype) if flow_stacked and all_indices_partitioned: value = array_ops.gather(value, all_indices) return wrap(value, flow_stacked) # Values in the TensorArray should be unstacked (since different iterations # couldn't write to the same location). So whether output is stacked or not # depends on index_stacked. if index_stacked: value = data_flow_ops.tensor_array_gather_v3( handle, index, flow, dtype=dtype) else: value = data_flow_ops.tensor_array_read_v3( handle, index, flow, dtype=dtype) return wrap(value, index_stacked) @RegisterPFor("TensorArrayWriteV3") def _convert_tensor_array_write_v3(pfor_input): handle = pfor_input.unstacked_input(0) index, index_stacked, _ = pfor_input.input(1) value, value_stacked, _ = pfor_input.input(2) flow, flow_stacked, _ = pfor_input.input(3) if value_stacked and pfor_input.pfor.all_indices_partitioned: # Looks like we are in a control flow in a pfor where not all iterations are # active now. We don't allow that since that could lead to different indices # having different shapes which will be hard to merge later. raise ValueError("Writing non loop invariant values to TensorArray from " "inside a while_loop/cond not supported.") if flow_stacked: flow = _unstack_flow(flow) is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0]) if is_inside: if index_stacked: raise ValueError("Need indices for %s to be loop invariant" % handle) if not flow_stacked and not value_stacked: flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow) return wrap(flow_out, False) else: if not value_stacked: value = _stack(value, pfor_input.pfor.loop_len_vector).t # TODO(agarwal): Note that if flow is unstacked and value is stacked, then # this may or may not be a safe situation. flow is unstacked both for a # freshly created TensorArray, as well as after unstacked values are # written to it. If it is the latter, then we cannot write a stacked value # now since that may cause runtime errors due to different shapes in the # array. At the moment we are not able to handle this gracefully and # distinguish between the two cases. That would require some heuristic # traversal of the graph to figure out whether all the writes are # unstacked or not. flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow) return _stack(flow_out, pfor_input.pfor.loop_len_vector) else: if not index_stacked: raise ValueError("Need indices for %s to be not loop invariant" % handle) # Note that even when index_stacked is true, actual values in index may # still not be unique. However that will cause runtime error when executing # the scatter operation below. if not value_stacked: value = _stack(value, pfor_input.pfor.loop_len_vector).t flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow) return _stack(flow_out, pfor_input.pfor.loop_len_vector) def _transpose_first_two_dims(value): # TODO(agarwal): optimize if one of the dims == 1. value_shape = array_ops.shape(value) v0 = value_shape[0] v1 = value_shape[1] value = array_ops.reshape(value, [v0, v1, -1]) value = array_ops.transpose(value, [1, 0, 2]) new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0) return array_ops.reshape(value, new_shape) @RegisterPFor("TensorArrayGatherV3") def _convert_tensor_array_gather_v3(pfor_input): handle = pfor_input.unstacked_input(0) indices, indices_stacked, _ = pfor_input.input(1) indices = array_ops.reshape(indices, [-1]) flow, flow_stacked, _ = pfor_input.input(2) if flow_stacked: flow = _unstack_flow(flow) dtype = pfor_input.get_attr("dtype") # TODO(agarwal): support element_shape attr? n = pfor_input.pfor.loop_len_vector value = data_flow_ops.tensor_array_gather_v3( handle, indices, flow, dtype=dtype) is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0]) if is_inside: # flow_stacked indicates if values in the TensorArray are stacked or not. if indices_stacked: if flow_stacked: raise ValueError( "It looks like TensorArrayGatherV3 was called on a TensorArray " "whose values are not loop-invariant, and the indices were also " "not loop invariant. This is currently unsupported.") else: value = _unflatten_first_dim(value, n) return wrap(value, True) else: if flow_stacked: # Since elements in this array are stacked and `value` was produced by # gather, its first two dims are "gathered elements" and "stack # dimension". Our semantics require these two to be flipped. value = _transpose_first_two_dims(value) return wrap(value, flow_stacked) else: # Values in the TensorArray should be unstacked (since different iterations # couldn't write to the same location). So whether output is stacked or not # depends on indices_stacked. if indices_stacked: value = _unflatten_first_dim(value, n) return wrap(value, indices_stacked) @RegisterPFor("TensorArrayScatterV3") def _convert_tensor_array_scatter_v3(pfor_input): handle = pfor_input.unstacked_input(0) indices, indices_stacked, _ = pfor_input.input(1) indices = array_ops.reshape(indices, [-1]) value, value_stacked, _ = pfor_input.input(2) flow, flow_stacked, _ = pfor_input.input(3) if flow_stacked: flow = _unstack_flow(flow) is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0]) if is_inside: if indices_stacked: raise ValueError("Need indices for %s to be loop invariant" % handle) # Note that flow_stacked indicates if existing values in the array are # stacked or not. if not flow_stacked and not value_stacked: flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value, flow) return wrap(flow_out, False) if not value_stacked: # TODO(agarwal): tile in the second dimension directly instead of # transposing below. value = _stack(value, pfor_input.pfor.loop_len_vector).t value = _transpose_first_two_dims(value) # TODO(agarwal): Note that if a previous write was unstacked, flow will be # unstacked, and a stacked value may be written here which may cause # runtime error due to different elements having different shape. We do # not try to prevent that. flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value, flow) return _stack(flow_out, pfor_input.pfor.loop_len_vector) if not indices_stacked: raise ValueError("Need indices for %s to be not loop invariant" % handle) if not value_stacked: value = _stack(value, pfor_input.pfor.loop_len_vector).t value = _flatten_first_two_dims(value) flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value, flow) return _stack(flow_out, pfor_input.pfor.loop_len_vector) @RegisterPFor("TensorArrayGradV3") def _convert_tensor_array_grad_v3(pfor_input): handle = pfor_input.unstacked_input(0) flow, flow_stacked, _ = pfor_input.input(1) if flow_stacked: flow = _unstack_flow(flow) source = pfor_input.get_attr("source") # TODO(agarwal): For now, we assume that gradients are stacked if the # TensorArrayGradV3 call is being done inside the pfor. Getting that wrong # will give runtime error due to incorrect shape being written to the # accumulator. It is difficult to know in advance if gradients written will be # stacked or not. Note that flow being stacked is not indicative of the # gradient being stacked or not. Revisit this later. shape_to_prepend = pfor_input.pfor.loop_len_vector grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape( handle=handle, flow_in=flow, shape_to_prepend=shape_to_prepend, source=source) flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t return [wrap(grad_handle, False), wrap(flow_out, True)] # StackV2 conversion is tricky since we don't have arrays of StackV2. So similar # to TensorArrays, we convert them by changing the dimension of the elements # inside the stack. # # We consider two cases: # # 1. StackV2 is constructed and used entirely inside the pfor loop. # We keep a single Stack and perform the push/pop operations of all the # iterations in lock-step. We also assume that all the iterations perform these # operations. In case of dynamic control flow, if only some of the iterations # try to perform a push/pop, then the conversion may not work correctly and may # cause undefined behavior. # TODO(agarwal): test StackV2 with dynamic control flow. # # 2. StackV2 is constructed outside the pfor loop. # Performing stack push/pop in a parallel fashion is ill-defined. However given # that reading stacks created externally is a common operation when computing # jacobians, we provide some special semantics here as follows. # - disallow push operations to the stack # - pop operations are performed in lock step by all iterations, similar to the # case when the stack is created inside. A single value is popped during the # lock-step operation and broadcast to all the iterations. Values in the stack # are assumed to be loop-invariant. # # Some other implementation details: # We use an ugly logic to find whether values in Stack data structure are # loop invariant or not. When converting push/pop operations, we keep track of # whether the last conversion used a stacked value or not (see _stack_cache # below). As a result if an unstacked value is written first, subsequent stacked # writes are disallowed when they could have been allowed in theory. # Map from cache key based on StackV2 handle to a bool indicating whether values # are stacked or not. # TODO(agarwal): move _stack_cache inside pfor? _stack_cache = {} def _stack_cache_key(pfor_input): """Create cache key corresponding to a stack handle.""" op_type = pfor_input.op_type assert op_type in ["StackPushV2", "StackPopV2"], op_type orig_handle = pfor_input.op.inputs[0] while orig_handle.op.type in ["Identity", "Enter"]: orig_handle = orig_handle.op.inputs[0] assert orig_handle.op.type == "StackV2", orig_handle.op return ops.get_default_graph(), pfor_input.pfor, orig_handle def _stack_handle_inside_pfor(handle, pfor_input): while handle.op.type in ["Identity", "Enter"]: handle = handle.op.inputs[0] assert handle.op.type == "StackV2", ( "Unable to find StackV2 op. Got %s" % handle.op) return pfor_input.pfor.op_is_inside_loop(handle.op) @RegisterPFor("StackPushV2") def _convert_stack_push_v2(pfor_input): handle = pfor_input.unstacked_input(0) elem, elem_stacked, _ = pfor_input.input(1) swap_memory = pfor_input.get_attr("swap_memory") if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input): raise ValueError("StackPushV2 not allowed on stacks created outside pfor") stack_cache_key = _stack_cache_key(pfor_input) stacked = _stack_cache.get(stack_cache_key, None) if stacked is None: stacked = elem_stacked _stack_cache[stack_cache_key] = stacked else: # If we previously made it unstacked then we can't revert to being stacked. if not stacked and elem_stacked: raise ValueError( "It looks like the stack was previously determined to be loop" " invariant, but we are now trying to push a loop dependent value" " to it. This is currently unsupported.") if stacked and not elem_stacked: elem = _stack(elem, pfor_input.pfor.loop_len_vector).t out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory) return wrap(out, stacked) # Note that inputs to this convertor will be unstacked. However it should get # called since it is a stateful op. @RegisterPFor("StackPopV2") def _convert_stack_pop_v2(pfor_input): handle = pfor_input.unstacked_input(0) stack_cache_key = _stack_cache_key(pfor_input) stacked = _stack_cache.get(stack_cache_key, None) # If a StackPushV2 has not been converted yet, we default to unstacked since # the push could be outside of pfor, or the covertor may not be called if the # inputs are unconverted. if stacked is None: stacked = False _stack_cache[stack_cache_key] = False elem_type = pfor_input.get_attr("elem_type") out = data_flow_ops.stack_pop_v2(handle, elem_type) return wrap(out, stacked) # parsing_ops @RegisterPFor("DecodeCSV") def _convert_decode_csv(pfor_input): lines = pfor_input.stacked_input(0) record_defaults = [ pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs) ] field_delim = pfor_input.get_attr("field_delim") use_quote_delim = pfor_input.get_attr("use_quote_delim") select_cols = pfor_input.get_attr("select_cols") if not select_cols: select_cols = None return [ wrap(t, True) for t in parsing_ops.decode_csv( lines, record_defaults, field_delim=field_delim, use_quote_delim=use_quote_delim, select_cols=select_cols) ] @RegisterPFor("ParseSingleExample") def _convert_parse_single_example(pfor_input): serialized = pfor_input.stacked_input(0) dense_defaults = [ pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs) ] sparse_keys = pfor_input.get_attr("sparse_keys") dense_keys = pfor_input.get_attr("dense_keys") sparse_types = pfor_input.get_attr("sparse_types") dense_shapes = pfor_input.get_attr("dense_shapes") output = gen_parsing_ops.parse_example( serialized=serialized, names=[], dense_defaults=dense_defaults, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, dense_shapes=dense_shapes) return [wrap(t, True, True) for t in nest.flatten(output)]
{ "content_hash": "ea2e2adedc4218ad4dfffc5619942f50", "timestamp": "", "source": "github", "line_count": 2622, "max_line_length": 80, "avg_line_length": 40.01792524790236, "alnum_prop": 0.672944046813499, "repo_name": "dancingdan/tensorflow", "id": "83cbe64ff21d6fa4380ddc9effb18b80feb5536b", "size": "105616", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tensorflow/python/ops/parallel_for/pfor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "3325" }, { "name": "Batchfile", "bytes": "10132" }, { "name": "C", "bytes": "339398" }, { "name": "C#", "bytes": "8446" }, { "name": "C++", "bytes": "49741628" }, { "name": "CMake", "bytes": "195409" }, { "name": "Dockerfile", "bytes": "36386" }, { "name": "Go", "bytes": "1254047" }, { "name": "HTML", "bytes": "4681865" }, { "name": "Java", "bytes": "867093" }, { "name": "Jupyter Notebook", "bytes": "2604735" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "58612" }, { "name": "Objective-C", "bytes": "15650" }, { "name": "Objective-C++", "bytes": "99243" }, { "name": "PHP", "bytes": "1357" }, { "name": "Perl", "bytes": "7536" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "41593453" }, { "name": "Ruby", "bytes": "553" }, { "name": "Shell", "bytes": "476832" }, { "name": "Smarty", "bytes": "6976" } ], "symlink_target": "" }
import pkgutil import inspect from viper.common.out import print_warning from viper.common.abstracts import Module def load_modules(): # Import modules package. import viper.modules as modules plugins = dict() # Walk recursively through all modules and packages. for loader, module_name, ispkg in pkgutil.walk_packages(modules.__path__, modules.__name__ + '.'): # If current item is a package, skip. if ispkg: continue # Try to import the module, otherwise skip. try: module = __import__(module_name, globals(), locals(), ['dummy'], -1) except ImportError as e: print_warning("Something wrong happened while importing the module {0}: {1}".format(module_name, e)) continue # Walk through all members of currently imported modules. for member_name, member_object in inspect.getmembers(module): # Check if current member is a class. if inspect.isclass(member_object): # Yield the class if it's a subclass of Module. if issubclass(member_object, Module) and member_object is not Module: plugins[member_object.cmd] = dict(obj=member_object, description=member_object.description) return plugins __modules__ = load_modules()
{ "content_hash": "d524b19f67ab84b52fd37a304db6cb94", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 112, "avg_line_length": 36.97222222222222, "alnum_prop": 0.6408715251690458, "repo_name": "S2R2/viper", "id": "8388ef5796c6f6e2e02f9a972b885fb3843ddfaf", "size": "1453", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "viper/core/plugins.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "1306" }, { "name": "JavaScript", "bytes": "9294" }, { "name": "Makefile", "bytes": "436" }, { "name": "Python", "bytes": "1552230" }, { "name": "Smarty", "bytes": "28213" } ], "symlink_target": "" }
import xlwt from xlwt import Font from xlwt import XFStyle class XlsExporter(object): @staticmethod def export(match_list, file_path): wb = xlwt.Workbook() ws = wb.add_sheet('Ranking') # writing header header = ['Ranking', 'Name', 'Rating', 'Reviews', 'Price', 'Cuisines', 'Edenred Name', 'Match', 'Edenred Address'] font = Font() font.name = 'Arial' font.bold = True style = XFStyle() style.font = font for index in range(len(header)): ws.write(0, index, header[index], style) # writing restaurants for index in range(len(match_list)): match = match_list[index] ws.write(index+1, 0, int(match.restaurant1.ranking)) ws.write(index+1, 1, xlwt.Formula('HYPERLINK("%s"; "%s")' % (match.restaurant1.url, match.restaurant1.name))) ws.write(index+1, 2, float(match.restaurant1.rating.replace(',', '.'))) ws.write(index+1, 3, int(match.restaurant1.review_count)) ws.write(index+1, 4, match.restaurant1.price) ws.write(index+1, 5, ', '.join(match.restaurant1.cuisines)) ws.write(index+1, 6, match.restaurant2.name) ws.write(index+1, 7, int(100.0 * match.score)) address = match.restaurant2.address + ', ' + match.restaurant2.postcode map_link = 'https://www.google.com/maps?f=q&source=s_q&hl=es&q=%s' % address ws.write(index+1, 8, xlwt.Formula('HYPERLINK("%s"; "%s")' % (map_link, address))) wb.save(file_path)
{ "content_hash": "bded8bb079666291e8e8dabae4f1bf8b", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 121, "avg_line_length": 39.09090909090909, "alnum_prop": 0.5354651162790698, "repo_name": "agustin-prats/edenred-ranking", "id": "98e69c648d876ac053f0da13b4a2d1dcf3c4846c", "size": "2854", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "edenred_ranking/export.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "54597" } ], "symlink_target": "" }
import sys,traceback if sys.version_info[0] != 3: print("This script is only python3 compatible!") exit(1) import numpy as np import h5py from argparse import ArgumentParser import os.path import json import matplotlib as mpl mpl.use("agg") import matplotlib.cm as cm import matplotlib.ticker as ticker import matplotlib.pyplot as plt import matplotlib.colors as colors from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.offsetbox import AnchoredOffsetbox, TextArea # Plot Settings ========================================================== def cm2inch(value): return value/2.54 def cm2pt(value): return cm2inch(value)*72 lineSettings = { "thin" : cm2pt(0.2/10), "extra-thin" : cm2pt(0.1/10), "semi-thick" : cm2pt(0.3/10), "thick" : cm2pt(0.4/10), "extra-thick" : cm2pt(0.6/10) } defaultMarkerSize = cm2pt(1.2/10) markerSize = defaultMarkerSize def defaultFormatAxes(*axesArgs): for axes in axesArgs: if not isinstance(axes,list): axes = [axes] for ax in axes: l = ax.get_legend() if l is not None: l.get_frame().set_linewidth(lineSettings["thin"]) for spine in ax.spines.values(): spine.set_linewidth(lineSettings["semi-thick"]) def defaultFormatColorbar(*cbars): for c in cbars: c.outline.set_linewidth(lineSettings["thin"]) # ======================================================================== defaultPlotSettings ={ "savePlots": True, "figurePrefix":"fig", "figureExt": ".png", "showTitles": True, "x0" : 0, "y0" : 0 } defaultOptions = { "imageFileName": "image001.h5" , "gridFileName": "outGridVelocity.h5", "scatterFileName": "outScatteredVelocity.h5", "tiePointsFolder" : "accivWork", "plotSettings" : defaultPlotSettings } def plotCmd(): parser = ArgumentParser() parser.add_argument("--folder", type=str, dest="folder", help="folder of the output data to be plotted") parser.add_argument("--imageFileName", type=str, dest="imageFileName", help="image file path") parser.add_argument("--gridFileName", type=str, dest="gridFileName") parser.add_argument("--scatterFileName", type=str, dest="scatterFileName") parser.add_argument("--tiePointsFolder", type=str, dest="tiePointsFolder") options = vars(parser.parse_args()) plot(options) def plot(options): # overwrite options with default values if not existing! def makeComplete(d,defaults): if isinstance(d,dict): for k in defaults.keys(): if k not in d: d[k] = defaults[k] else: if d[k] is None: d[k] = defaults[k] # key is in d makeComplete(d[k],defaults[k]) # goes on if d[k] is a dict makeComplete(options,defaultOptions) print("arguments:" , options) plotSettings = options["plotSettings"] """ Add an anchored text to the axis """ def addText(ax, t, loc=2, textp = dict(size=12), pad = 0.1, borderpad = 0.5 , frameon =True, fc="white", ec="black" ,lw=1): a = ax.annotate(t, (1,0), (0, -20), xycoords='axes fraction', textcoords='offset points', va='top', ha='right', **textp) return a plt.close("all") noScatterFile = False scatterFileName = os.path.join(options["folder"],options["scatterFileName"]) if(not os.path.exists(scatterFileName)): noScatterFile = True gridFileName = os.path.join(options["folder"],options["gridFileName"]) if(not os.path.exists(gridFileName)): raise ValueError("not found:", gridFileName) tiePointsFileName = os.path.join(options["folder"],options["tiePointsFolder"],"combinedCorrelationTiePoints.h5") if(not os.path.exists(tiePointsFileName)): print("not found:", tiePointsFileName) exit() imageFileName = options["imageFileName"] if(not os.path.exists(imageFileName)): raise ValueError("not found:", imageFileName) # Plot Options ===================================================== # figure titles figureSize= (cm2inch(16),cm2inch(12)) figures={} figureTitles=dict() # background axisBG = [1,1,1] # velocity norm range ( [xmin,ymin],[xmax,ymax]) velocityRange = np.array([[-7,-7],[7,7]]) velocityMaxNorm = np.around(np.linalg.norm(velocityRange[1] - velocityRange[0]),decimals=1) pixelShiftRange = [-10,10] # plot a velocity vector every "skip" pixels from the gridded velocity data skip = 6 # location uncertainty corrLocationUncertaintyMax = 0.002 corrVelocityUncertaintyMax = velocityMaxNorm tiePointFractionMax = 0.1 # number of points to be sampled from the scattered data maxPoints = 10000 # the locations of the major and minor axes to plot x0 = plotSettings["x0"] y0 = plotSettings["y0"] # the width around each axis to take points from when plotting axes dx = 10 dy = 10 # location of time and frame idx locFrame = 4 propFrameText = dict(color="black", size=9) maxImageStd = 3.0 # Image data is clamped to be within 3 std. dev. from the mean. # Decrease this number to increase image contrast. # decrease these numbers to increase the length of vectors, and visa versa scatterVectorScale = 0.5 gridVectorScale = 0.5 quiverOpts = {'headwidth':4, 'headlength':2} colormap = cm.Greys_r # =================================================================== h5File = h5py.File(imageFileName, 'r') time = h5File["time"][...] if not "frameIdx" in options: frameIdx = 0 else: frameIdx = options["frameIdx"] print("Time: ", time) bounds = h5File["bounds"][...] imageData = h5File["data"][...] print("Bounds:" , bounds) #print("Index (0,0) (top-left):", imageData[0,0]) #print("Index (1,0):", imageData[1,0]) imageMask = np.array(h5File["mask"][...],bool) imageFinalMask = np.ma.masked_equal(h5File["finalMask"][...],0) print("Final mask:", imageFinalMask.shape, imageFinalMask.dtype) h5File.close() if not noScatterFile: h5File = h5py.File(scatterFileName, 'r') x = h5File["x"][...] y = h5File["y"][...] vx = h5File["vx"][...] vy = h5File["vy"][...] pixelVx = h5File["dataX"][...] pixelVy = h5File["dataY"][...] h5File.close() h5File = h5py.File(tiePointsFileName, 'r') deltaTs = h5File["deltaTs"][...] maxDeltaT = np.amax(deltaTs) residualsFound = "correlationVelocityResiduals" in h5File if residualsFound: correlationVelocityResiduals = h5File["correlationVelocityResiduals"][...] correlationLocationResiduals = h5File["correlationLocationResiduals"][...] correlationTiePointsX1 = h5File["x1"][...] correlationTiePointsY1 = h5File["y1"][...] correlationTiePointsX2 = h5File["x2"][...] correlationTiePointsY2 = h5File["y2"][...] h5File.close() h5File = h5py.File(gridFileName, 'r') gridVx = h5File["vx"][...] gridVy = h5File["vy"][...] h5File.close() print("Gridded velocity shape: ", gridVx.shape) gx = np.linspace(bounds[0],bounds[1],gridVx.shape[1]) gy = np.linspace(bounds[2],bounds[3],gridVx.shape[0]) # File info: =========================================================== gridSize = gridVx.shape print( "Grid Data size: " + str(gridSize) ) if not noScatterFile: print( "Scattered data size: " + str(vx.shape) ) print( "Scattered pixelVx size: " + str(pixelVx.shape) ) # ====================================================================== # Setup Data============================================================ [gridX, gridY] = np.meshgrid(gx,gy) vMagGrid = np.sqrt(gridVx**2 + gridVy**2) if not noScatterFile: dLon = gx[1]-gx[0] dLat = gy[1]-gy[0] pixelVx = pixelVx/dLon*maxDeltaT pixelVy = pixelVy/dLat*maxDeltaT vMag = np.sqrt(vx**2+vy**2) #imageMean = np.mean(imageData[imageMask]) #imageStd = np.std(imageData[imageMask]) #imageData *= imageMask #imageData = np.maximum(imageMean-maxImageStd*imageStd, #np.minimum(imageMean+maxImageStd*imageStd,imageData)) if not noScatterFile: if(x.size > maxPoints): indices = np.array(np.random.rand(maxPoints)*x.size,int) else: indices = np.array(np.linspace(0,x.size-1,x.size),int) maskXAxis = np.abs(y-y0) < dy maskYAxis = np.abs(x-x0) < dx xAxisIndices = indices[maskXAxis[indices]] yAxisIndices = indices[maskYAxis[indices]] xAxisGridIndex = np.argmin(np.abs(gy-y0)) yAxisGridIndex = np.argmin(np.abs(gx-x0)) # Mask the correlation tie points, to plot the uncertainty # histogram only for the tie points (x1,y1 in image 1 to x2,y2 in image 2) # in the mask ============================================================ if residualsFound: # acciv grid is (y,x) indexed with left-bottom corner as origin dim = np.array([gridSize[1],gridSize[0]],dtype=int) minPoint = np.array([bounds[0],bounds[2]],dtype=float) maxPoint = np.array([bounds[0+1],bounds[2+1]],dtype=float) dxInv = 1.0/((maxPoint - minPoint) / dim) points1 = np.vstack([correlationTiePointsX1,correlationTiePointsY1]).T # [[x,y],[x,y]] points2 = np.vstack([correlationTiePointsX2,correlationTiePointsY2]).T def getValidIndices(dim,minP,dxInv,points): indices = ((points - minP) * dxInv).astype(int) validIndices = np.logical_not( ((indices[:,0] < 0) | (indices[:,0] >= dim[0])) | ((indices[:,1] < 0) | (indices[:,1] >= dim[1])) ) return indices, validIndices indices1, validMask1 = getValidIndices(dim,minPoint,dxInv,points1) indices2, validMask2 = getValidIndices(dim,minPoint,dxInv,points2) totalMask = validMask1 & validMask2 # both tie points need to be in bounds indices1 = indices1[totalMask] indices2 = indices2[totalMask] mask = imageFinalMask.mask # take care (y,x) indexes # mask can result in to a single bool if isinstance(mask,np.bool_): mask = np.ones(imageFinalMask.shape,dtype=bool)*mask correlationTiePointMask = np.logical_not( mask[indices1[:,1],indices1[:,0]] | mask[indices2[:,1],indices2[:,0]]) #either first point masked or the second points results in neglecting the tiepoint print("Correlation tie points in mask: %f" % (np.sum(correlationTiePointMask) / correlationVelocityResiduals.size *100 ) + "%") # mask correlation vel/loc residuals for further processing correlationVelocityResiduals = correlationVelocityResiduals[correlationTiePointMask] correlationLocationResiduals = correlationLocationResiduals[correlationTiePointMask] # ======================================================================== # Plot Data ============================================================ figCount = 1 if not noScatterFile: figureTitles[figCount] = 'a sample of %i scattered velocity vectors'%(indices.size) fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111, aspect='equal') ax.imshow(imageData, origin='lower', extent=(bounds[0],bounds[1],bounds[2],bounds[3]), cmap=colormap) ax.quiver(x[indices], y[indices], vx[indices], vy[indices], color='g', pivot='tail', scale_units='xy', scale=scatterVectorScale, **quiverOpts) ax.quiver(x[xAxisIndices], y[xAxisIndices], vx[xAxisIndices], vy[xAxisIndices], color='r', pivot='mid', scale_units='xy', scale=scatterVectorScale, **quiverOpts) ax.quiver(x[yAxisIndices], y[yAxisIndices], vx[yAxisIndices], vy[yAxisIndices], color='b', pivot='mid', scale_units='xy', scale=scatterVectorScale, **quiverOpts) if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) ax.set_xlabel("$x$ [m]") ax.set_ylabel("$y$ [m]") addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) ##fig = plt.figure(12, figsize=[width,height]) ###fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25) ##ax = fig.add_subplot(111, aspect='equal') ##plt.imshow(imageData, extent=(bounds[0],bounds[1],bounds[3],bounds[2]), cmap=colormap) ##ax.set_ylim(ax.get_ylim()[::-1]) ##plt.axis('tight') figCount+=1 figureTitles[figCount] = 'gridded velocity vector (skip = %i)'% (skip) fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111, aspect='equal') ax.imshow(imageData, origin='lower', extent=(bounds[0],bounds[1],bounds[2],bounds[3]), cmap=colormap) ax.quiver(gridX[::skip,::skip], gridY[::skip,::skip], gridVx[::skip,::skip], gridVy[::skip,::skip], color='g', pivot='tail', scale_units='xy', scale=gridVectorScale) if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) ax.set_xlabel("$x$ [m]") ax.set_ylabel("$y$ [m]") addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) figCount+=1 #figureTitles[figCount] = '$v_x$' #fig = plt.figure(figCount,figsize=figureSize) #figures[figCount] = fig #ax = fig.add_subplot(111, aspect='equal') #im = ax.imshow(gridVx, origin='lower', extent=(bounds[0],bounds[1],bounds[2],bounds[3]), cmap=plt.get_cmap('jet')) #divider = make_axes_locatable(ax) #cax1 = divider.append_axes("right", size="2%", pad=0.2) #fig.colorbar(im, cax = cax1) #im.set_clim(0,velocityMaxNorm) #ax.set_aspect('equal') #im.set_clim(velocityRange[0][0],velocityRange[1][0]) #if plotSettings["showTitles"]: #ax.set_title(figureTitles[figCount]) #ax.set_xlabel("$x$ $[m]$") #ax.set_ylabel("$y$ $[m]$") #addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) #fig.tight_layout(pad=0.1) #defaultFormatAxes(ax) #cax1.set_ylabel(r'$[m/s]$') figCount+=1 #figureTitles[figCount] = '$v_y$' #fig = plt.figure(figCount,figsize=figureSize) #figures[figCount] = fig #ax = fig.add_subplot(111, aspect='equal') #im = ax.imshow(gridVy, origin='lower', extent=(bounds[0],bounds[1],bounds[2],bounds[3]), cmap=plt.get_cmap('jet')) #divider = make_axes_locatable(ax) #cax1 = divider.append_axes("right", size="2%", pad=0.2) #fig.colorbar(im, cax = cax1) #im.set_clim(0,velocityMaxNorm) #ax.set_aspect('equal') #im.set_clim(velocityRange[0][1],velocityRange[1][1]) #if plotSettings["showTitles"]: #ax.set_title(figureTitles[figCount]) #ax.set_xlabel("$x$ $[m]$") #ax.set_ylabel("$y$ $[m]$") #addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) #fig.tight_layout(pad=0.1) #defaultFormatAxes(ax) #cax1.set_ylabel(r'$[m/s]$') figCount+=1 figureTitles[figCount] = r'$||\mathbf{v}||$' fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111, aspect='equal') im = ax.imshow(vMagGrid, origin='lower', extent=(bounds[0],bounds[1],bounds[2],bounds[3]), cmap=plt.get_cmap('jet') , interpolation='none') divider = make_axes_locatable(ax) cax1 = divider.append_axes("right", size="3%", pad=0.2) c=fig.colorbar(im, cax = cax1) im.set_clim(0,velocityMaxNorm) ax.set_aspect('equal') if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) ax.set_xlabel("$x$ [m]") ax.set_ylabel("$y$ [m]") cax1.set_ylabel(r'[m/s]') addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) defaultFormatColorbar(c) if not noScatterFile: weights = np.ones(vx.shape)/vx.size figCount+=1 figureTitles[figCount] = 'velocity histograms' fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig #fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25) ax = fig.add_subplot(111) ax.hist(vMag,100,weights=weights,histtype='step',color='k') ax.hist(vx,100,weights=weights,histtype='step',color=[0.4]*3) ax.hist(vy,100,weights=weights,histtype='step',color=[0.6]*3) ax.set_xlabel('velocity') ax.set_xlim(pixelShiftRange) ax.set_ylabel('tie point fraction') if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) ax.legend([r'$||\mathbf{v}||$','$v_x$','$v_y$']) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) figCount+=1 figureTitles[figCount] = 'pixel offset histograms (search range)' fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111) ax.hist(np.sqrt(pixelVx**2+pixelVy**2),100,weights=weights,histtype='step',color='k') ax.hist(pixelVx,100,weights=weights,histtype='step',color=[0.4]*3) ax.hist(pixelVy,100,weights=weights,histtype='step',color=[0.6]*3) ax.set_xlabel(r'pixel vel. $\cdot$maxDeltaT [px])') ax.set_xlim(pixelShiftRange) ax.set_ylabel('tie point fraction') if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) ax.legend([r'$||\mathbf{v}||$','$v_x$','$v_y$']) addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) #figCount+=1 #fig = plt.figure(figCount,figsize=figureSize) #ax = fig.add_subplot(111, aspect='equal') #plt.plot(x[maskXAxis], vy[maskXAxis], '.k',gx,gridVy[xAxisGridIndex,:],'r') #plt.set_title('$v_y$ along $x$ axis within $dy = %.1f$ of $y = %.1f$'%(dy,y0)) #plt.set_xlabel('$x$ $[m]$') #plt.set_ylabel('$v_y$ $[m/s]$') #plt.axis('tight') figCount+=1 figureTitles[figCount] = r'$||\mathbf{v}||$ along $y$ axis within $dx = %.2f$ of $x = %.2f$ '%(dx,x0) fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111) if not noScatterFile: ax.plot(vMag[maskYAxis], y[maskYAxis], '.k',ms=markerSize) ax.plot(vMagGrid[:,yAxisGridIndex] * imageFinalMask[:,yAxisGridIndex],gy, color=[0.5]*3, lw=lineSettings["thick"]) if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) ax.set_xlim([0,velocityMaxNorm]) ax.set_ylim(bounds[2:4]) ax.set_xlabel('$||\mathbf{v}||$ [m/s]') ax.set_ylabel('$y$ [m]') addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) figCount+=1 figureTitles[figCount] = r'$||\mathbf{v}||$ along $x$ axis within $dy = %.2f$ of $y = %.2f$' % (dy,y0) fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111) if not noScatterFile: ax.plot(x[maskXAxis], vMag[maskXAxis], '.k', ms=markerSize) ax.plot(gx,vMagGrid[xAxisGridIndex,:] * imageFinalMask[xAxisGridIndex,:] ,color=[0.5]*3 ,lw=lineSettings["thick"]) if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) ax.set_xlim(bounds[0:2]) ax.set_ylim([0,velocityMaxNorm]) ax.set_ylabel('$||\mathbf{v}||$ [m/s]') ax.set_xlabel('$x$ $[m]$') addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) #plot masked velocity with velocityMask figCount+=1 figureTitles[figCount] = r'Velocity $||\mathbf{v}||$ , masked grid' fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111, aspect='equal', axisbg=axisBG) absVelMasked = vMagGrid * imageFinalMask; if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) ax.set_xlabel('$x$ [m]') ax.set_ylabel('$y$ [m]') ax.set_axisbelow(True) #image im = ax.imshow(absVelMasked, origin='lower', extent=(bounds[0],bounds[1],bounds[2],bounds[3]), cmap=plt.get_cmap('jet') , interpolation='none', zorder=1.2, clim=[0,velocityMaxNorm]) # colorbar divider = make_axes_locatable(ax) cax1 = divider.append_axes("right", size="3%", pad=0.2) cax1.tick_params(length=3) c=fig.colorbar(im, cax = cax1, format=ticker.FuncFormatter(lambda x,pos: "%0.1f"%x)) for l in cax1.yaxis.get_ticklabels()[1::2]: l.set_visible(False) cax1.set_title(r'[m/s]',fontsize=10) # grid ax.grid(linestyle="-",linewidth=lineSettings["extra-thin"], color=[0.5]*3) # remove every second label for label in ax.xaxis.get_ticklabels()[::2]: label.set_visible(False) # ticks ax.tick_params(top='off',right='off',bottom='off',left='off') addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) defaultFormatAxes(ax) defaultFormatColorbar(c) fig.tight_layout(pad=0.1) if residualsFound: mean = np.mean(correlationVelocityResiduals) med=np.median(correlationVelocityResiduals) maxVal = 6.0*med figCount+=1 figureTitles[figCount] = r'correlation velocity uncertainty' fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111) weights = np.ones(correlationVelocityResiduals.shape)/correlationVelocityResiduals.size ax.hist(correlationVelocityResiduals,100,range=[0.0,maxVal], weights=weights,histtype='step',color='k') ax.axvline(mean,color="gray",ls="dashed") ax.set_xlabel('correlation velocity uncertainty [m/s]') ax.set_ylabel('tie point fraction') ax.set_xlim([0,corrVelocityUncertaintyMax]) ax.set_ylim([0,tiePointFractionMax]) if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) mean = np.mean(correlationLocationResiduals) med =np.median(correlationLocationResiduals) maxVal = 6.0*med figCount+=1 figureTitles[figCount] = r'correlation location uncertainty' fig = plt.figure(figCount,figsize=figureSize) figures[figCount] = fig ax = fig.add_subplot(111) weights = np.ones(correlationLocationResiduals.shape)/correlationLocationResiduals.size ax.hist(correlationLocationResiduals,100,range=[0.0,maxVal], weights=weights,histtype='step', color='k') ax.axvline(mean,color="gray",ls="dashed") ax.set_xlabel('correlation location uncertainty [m]') ax.set_ylabel('tie point fraction') ax.set_xlim([0,corrLocationUncertaintyMax]) ax.set_ylim([0,tiePointFractionMax]) if plotSettings["showTitles"]: ax.set_title(figureTitles[figCount]) addText(ax,"t: %.04f s, frame: %i" % (time,frameIdx), loc=locFrame, textp = propFrameText) fig.tight_layout(pad=0.1) defaultFormatAxes(ax) plt.draw() if plotSettings["savePlots"]: print("Saving plots...") for figIdx, fig in figures.items(): outFileName = os.path.join( options["folder"], "%s%03i%s" % (plotSettings["figurePrefix"], figIdx,plotSettings["figureExt"]) ) fig.savefig(outFileName, dpi=600) plt.close(fig) with open(os.path.join( options["folder"], "figureTitles.json") ,"w" ) as f: json.dump(figureTitles,f) else: print("Showing plots...") plt.show() #plt.close("all") if __name__ == "__main__": try: sys.exit(plotCmd()) except Exception as e: print("====================================================================") print("Exception occured: " + str(e)) print("====================================================================") traceback.print_exc(file=sys.stdout) sys.exit(111)
{ "content_hash": "35ddeb2b54a3e64159b1488cf6b44ab5", "timestamp": "", "source": "github", "line_count": 649, "max_line_length": 170, "avg_line_length": 38.55469953775039, "alnum_prop": 0.5995124290624251, "repo_name": "gabyx/acciv", "id": "61f747d03e79d9033929a85eea78cec1f8c7a394", "size": "25045", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/strainMeasurement/plotVelocities.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1358" }, { "name": "C++", "bytes": "286340" }, { "name": "CMake", "bytes": "5926" }, { "name": "Makefile", "bytes": "649" }, { "name": "Matlab", "bytes": "17000" }, { "name": "Python", "bytes": "60835" }, { "name": "Shell", "bytes": "7525" } ], "symlink_target": "" }
import six.moves from numbers import Integral from .iter_dispatch import range_iterator __all__ = ['xrange'] def _check_integral(value): if not isinstance(value, Integral): raise TypeError("'{}' object cannot be interpreted " "as an integer".format(type(value).__name__)) class xrange(object): """A replacement for Python 3 `range()` (and Python 2 `xrange()`) that yields picklable iterators when iterated upon. """ __slots__ = ['_start', '_stop', '_step'] def __init__(self, *args): self._start = 0 self._step = 1 if len(args) == 0: raise TypeError("{} expected 1 arguments, got 0".format( self.__class__.__name__)) elif len(args) == 1: self._stop = args[0] self._start = 0 elif len(args) >= 2: self._start = args[0] self._stop = args[1] if len(args) == 3: self._step = args[2] if len(args) > 3: raise TypeError("{} expected at most 3 arguments, got {}".format( self.__class__.__name__, len(args))) _check_integral(self._start) _check_integral(self._stop) _check_integral(self._step) @property def start(self): return self._start @property def stop(self): return self._stop @property def step(self): return self._step def count(self, i): """rangeobject.count(value) -> integer -- return number of occurrences of value """ if self._stop > self._start and self._step > 0: return int(self._start <= i < self._stop and (i - self._start) % self._step == 0) elif self._stop < self._start and self._step < 0: return int(self._start >= i > self._stop and (i - self._start) % self._step == 0) else: return False def index(self, i): """xrangeobject.index(value, [start, [stop]]) -> integer -- return index of value. Raise ValueError if the value is not present. """ if self.count(i) == 0: raise ValueError("{} is not in range".format(i)) return (i - self._start) // self._step def __len__(self): return len(six.moves.xrange(self._start, self._stop, self._step)) def __reduce__(self): return (self.__class__, (self.start, self.stop, self.step)) def __iter__(self): return range_iterator(self) def __repr__(self): return (__name__.split('.')[0] + '.' + self.__class__.__name__ + (str((self.start, self.stop)) if self.step == 1 else str((self.start, self.stop, self.step))))
{ "content_hash": "ce2a688e1ada7be2c65da1a032abadbd", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 78, "avg_line_length": 31.689655172413794, "alnum_prop": 0.5208560029017048, "repo_name": "orhanf/picklable_itertools", "id": "e4d6c12203d7fcebeff80e728cf4da4437830b88", "size": "2757", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "picklable_itertools/range.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "60689" } ], "symlink_target": "" }
"""This file contains XChat scrollback log file parser in plaso. Information updated 06 September 2013. Besides the logging capability, the XChat IRC client has the option to record the text for opened tabs. So, when rejoining a particular channel and/or a particular conversation, XChat will display the last messages exchanged. This artifact could be present, if not disabled, even if normal logging is disabled. From the XChat FAQ (http://xchatdata.net/Using/FAQ): Q: 'How do I keep text from previous sessions from being displayed when I join a channel?' R: 'Starting in XChat 2.8.4, XChat implemented the Scrollback feature which displays text from the last time you had a particular tab open. To disable this setting for all channels, Go to Settings -> Preferences -> Logging and uncheck Display scrollback from previous session. In XChat 2.8.6, XChat implemented both Per Channel Logging, and Per Channel Scrollbacks. If you are on 2.8.6 or newer, you can disable loading scrollback for just one particular tab name by right clicking on the tab name, selecting Settings, and then unchecking Reload scrollback' The log file format differs from logging format, but it's quite simple 'T 1232315916 Python interface unloaded' <T><space><decimal timestamp><space><text><\n> The time reported in the log is the number of seconds since January 1, 1970 00:00:00 UTC (from source code, time(0)). The <text> part could contain some 'decorators' (bold, underline, colors indication, etc.), so the parser should strip those control fields. References http://xchat.org """ import logging import pyparsing from plaso.containers import time_events from plaso.lib import eventdata from plaso.lib import timelib from plaso.parsers import manager from plaso.parsers import text_parser __author__ = 'Francesco Picasso (francesco.picasso@gmail.com)' class XChatScrollbackEvent(time_events.PosixTimeEvent): """Convenience class for a XChat Scrollback line event. Attributes: nickname: a string containin the nickname. offset: an integer containing the offset of the event. text: a string containing the text sent by nickname or other text (server, messages, etc.). """ DATA_TYPE = u'xchat:scrollback:line' def __init__(self, posix_time, offset, nickname, text): """Initializes the event object. Args: posix_time: the POSIX time value, which contains the number of seconds since January 1, 1970 00:00:00 UTC. offset: an integer containing the offset of the event. nickname: a string containin the nickname. text: a string containing the text sent by nickname or other text (server, messages, etc.). """ super(XChatScrollbackEvent, self).__init__( posix_time, eventdata.EventTimestamp.ADDED_TIME) self.nickname = nickname self.offset = offset self.text = text class XChatScrollbackParser(text_parser.PyparsingSingleLineTextParser): """Parse XChat scrollback log files.""" NAME = u'xchatscrollback' DESCRIPTION = u'Parser for XChat scrollback log files.' _ENCODING = u'UTF-8' # Define how a log line should look like. LOG_LINE = ( pyparsing.Literal(u'T').suppress() + pyparsing.Word(pyparsing.nums).setResultsName(u'timestamp') + pyparsing.SkipTo(pyparsing.LineEnd()).setResultsName(u'text')) LOG_LINE.parseWithTabs() # Define the available log line structures. LINE_STRUCTURES = [ (u'logline', LOG_LINE), ] # Define for the stripping phase. STRIPPER = ( pyparsing.Word(u'\x03', pyparsing.nums, max=3).suppress() | pyparsing.Word(u'\x02\x07\x08\x0f\x16\x1d\x1f', exact=1).suppress()) # Define the structure for parsing <text> and get <nickname> and <text> MSG_NICK_START = pyparsing.Literal(u'<') MSG_NICK_END = pyparsing.Literal(u'>') MSG_NICK = pyparsing.SkipTo(MSG_NICK_END).setResultsName(u'nickname') MSG_ENTRY_NICK = pyparsing.Optional(MSG_NICK_START + MSG_NICK + MSG_NICK_END) MSG_ENTRY_TEXT = pyparsing.SkipTo(pyparsing.LineEnd()).setResultsName(u'text') MSG_ENTRY = MSG_ENTRY_NICK + MSG_ENTRY_TEXT MSG_ENTRY.parseWithTabs() def __init__(self): """Initializes a parser object.""" super(XChatScrollbackParser, self).__init__() self._offset = 0 def VerifyStructure(self, parser_mediator, line): """Verify that this file is a XChat scrollback log file. Args: parser_mediator: A parser mediator object (instance of ParserMediator). line: A single line from the text file. Returns: True if this is the correct parser, False otherwise. """ structure = self.LOG_LINE try: parsed_structure = structure.parseString(line) except pyparsing.ParseException: logging.debug(u'Not a XChat scrollback log file') return False try: posix_time = int(parsed_structure.timestamp) except ValueError: logging.debug( u'Not a XChat scrollback log file, invalid timestamp string') return False if not timelib.Timestamp.FromPosixTime(posix_time): logging.debug(u'Not a XChat scrollback log file, invalid timestamp') return False return True def ParseRecord(self, parser_mediator, key, structure): """Parses a log record structure and produces events. Args: parser_mediator: A parser mediator object (instance of ParserMediator). key: An identification string indicating the name of the parsed structure. structure: A pyparsing.ParseResults object from a line in the log file. """ if key != u'logline': logging.warning( u'Unable to parse record, unknown structure: {0:s}'.format(key)) return try: posix_time = int(structure.timestamp) except ValueError: logging.debug(u'Invalid timestamp string {0:s}, skipping record'.format( structure.timestamp)) return try: nickname, text = self._StripThenGetNicknameAndText(structure.text) except pyparsing.ParseException: logging.debug(u'Error parsing entry at offset {0:d}'.format(self._offset)) return event_object = XChatScrollbackEvent( posix_time, self._offset, nickname, text) parser_mediator.ProduceEvent(event_object) def _StripThenGetNicknameAndText(self, text): """Strips decorators from text and gets <nickname> if available. This method implements the XChat strip_color2 and fe_print_text functions, slightly modified to get pure text. From the parsing point of view, after having stripped, the code takes everything as is, simply replacing tabs with spaces (as the original XChat code). So the VerifyStructure plays an important role in checking if the source file has the right format, since the method will not raise any parse exception and every content will be good. Args: text: The text obtained from the record entry. Returns: A list containing two entries: nickname: The nickname if present. text: The text written by nickname or service messages. """ stripped = self.STRIPPER.transformString(text) structure = self.MSG_ENTRY.parseString(stripped) text = structure.text.replace(u'\t', u' ') return structure.nickname, text manager.ParsersManager.RegisterParser(XChatScrollbackParser)
{ "content_hash": "2a5dfeb357467ac0dd27990d248a7572", "timestamp": "", "source": "github", "line_count": 208, "max_line_length": 80, "avg_line_length": 35.27884615384615, "alnum_prop": 0.7143635868083946, "repo_name": "dc3-plaso/plaso", "id": "efbca09d1bcfb8755119abb2137a1ce1f7616d85", "size": "7362", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "plaso/parsers/xchatscrollback.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1683" }, { "name": "Makefile", "bytes": "1151" }, { "name": "Python", "bytes": "3875098" }, { "name": "Shell", "bytes": "17861" } ], "symlink_target": "" }
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tournament', '0181_auto_20180610_2123'), ] operations = [ migrations.AlterField( model_name='modrequest', name='type', field=models.CharField(choices=[('withdraw', 'Withdraw'), ('reregister', 'Re-register'), ('appeal_late_response', 'Appeal late response'), ('appeal_noshow', 'Appeal no-show'), ('appeal_draw_scheduling', 'Appeal scheduling draw'), ('claim_win_noshow', 'Claim a forfeit win (no-show)'), ('claim_win_effort', 'Claim a forfeit win (insufficient effort)'), ('claim_draw_scheduling', 'Claim a scheduling draw'), ('claim_loss', 'Claim a forfeit loss'), ('request_continuation', 'Request continuation')], max_length=255), ), ]
{ "content_hash": "616f02e7c61ae18035eff3d1cc1189d3", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 525, "avg_line_length": 51.0625, "alnum_prop": 0.6462668298653611, "repo_name": "cyanfish/heltour", "id": "440cd41dd7edeb177805774fe5728b8235f5028b", "size": "891", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "heltour/tournament/migrations/0182_auto_20180728_2344.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "13951" }, { "name": "HTML", "bytes": "310481" }, { "name": "JavaScript", "bytes": "26784" }, { "name": "Python", "bytes": "902629" }, { "name": "SCSS", "bytes": "32099" }, { "name": "Shell", "bytes": "4551" } ], "symlink_target": "" }
from django.template.loader import get_template from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest from django.template import Context from teaching_data_structure import TeachingHierarchy, SchoolYear, TeachingClass from utils import get_header_navbar import json from djangoSRV.teacher import get_average_for_all_assignments, get_average_grade_for_year, get_average_grade_for_class, get_student_grades_for_assingments, get_courses_with_assignments2 #from auth import auth_utils #from model.models import Teacher, Course def get_teacher_view(request): # at this point it should be a guaranteed that the session has # a key named 'user_id' representing a teacher if (request.user.is_authenticated() and request.user.is_type("Teacher")): name = request.user.first_name + " " + request.user.last_name teaching_hierarchy = get_courses_with_assignments2(request.user.user_id) template = get_template("teacher_view.html") elements = get_header_navbar("Teacher",name,"Teaching Overview") context = Context( {'header' : elements['header'], 'navbar' : elements['navbar'], 'teaching_hierarchy': teaching_hierarchy, 'menu' : get_template("teacher_menu.html").render(Context({"page":"overview"})), }) return HttpResponse(template.render(context)) return HttpResponseRedirect("/") def get_overview(request): if (request.user.is_authenticated() and request.is_ajax()): tch_id = request.user.user_id table = get_average_for_all_assignments(tch_id) return HttpResponse(json.dumps(table)) return HttpResponseBadRequest() def get_year_overview(request): if (request.user.is_authenticated() and request.is_ajax()): tch_id = request.user.user_id year = request.GET["year"] table = get_average_grade_for_year(tch_id, year) return HttpResponse(json.dumps(table)) return HttpResponseBadRequest() def get_class_overview(request): if (request.user.is_authenticated() and request.is_ajax()): tch_id = request.user.user_id year = request.GET["year"] cls = request.GET["cls"] table = get_average_grade_for_class(tch_id, year, cls) return HttpResponse(json.dumps(table)) return HttpResponseBadRequest() def get_assignment_overview(request): if (request.user.is_authenticated() and request.is_ajax()): tch_id = request.user.user_id cls = request.GET["cls"] as_name = request.GET["as_name"] table = get_student_grades_for_assingments(tch_id, cls, as_name) return HttpResponse(json.dumps(table)) return HttpResponseBadRequest()
{ "content_hash": "fced5aecf652ce6522b05e130921a355", "timestamp": "", "source": "github", "line_count": 63, "max_line_length": 185, "avg_line_length": 44.17460317460318, "alnum_prop": 0.6773266259432267, "repo_name": "varun-verma11/CodeDrill", "id": "8a90a2cec364a78e16b3ad73f5d42df95abba6ac", "size": "2783", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "djangoSRV/Views/teacher_view.py", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "CSS", "bytes": "85283" }, { "name": "JavaScript", "bytes": "608943" }, { "name": "PHP", "bytes": "29" }, { "name": "Python", "bytes": "76311" }, { "name": "Ruby", "bytes": "1882" } ], "symlink_target": "" }
"""This module contains Google Campaign Manager sensor.""" from typing import Dict, Optional, Sequence, Union from airflow.providers.google.marketing_platform.hooks.campaign_manager import GoogleCampaignManagerHook from airflow.sensors.base import BaseSensorOperator class GoogleCampaignManagerReportSensor(BaseSensorOperator): """ Check if report is ready. .. seealso:: Check official API docs: https://developers.google.com/doubleclick-advertisers/v3.3/reports/get .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GoogleCampaignManagerReportSensor` :param profile_id: The DFA user profile ID. :type profile_id: str :param report_id: The ID of the report. :type report_id: str :param file_id: The ID of the report file. :type file_id: str :param api_version: The version of the api that will be requested for example 'v3'. :type api_version: str :param gcp_conn_id: The connection ID to use when fetching connection info. :type gcp_conn_id: str :param delegate_to: The account to impersonate using domain-wide delegation of authority, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :type delegate_to: str :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :type impersonation_chain: Union[str, Sequence[str]] """ template_fields = ( "profile_id", "report_id", "file_id", "impersonation_chain", ) def poke(self, context: Dict) -> bool: hook = GoogleCampaignManagerHook( gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, api_version=self.api_version, impersonation_chain=self.impersonation_chain, ) response = hook.get_report(profile_id=self.profile_id, report_id=self.report_id, file_id=self.file_id) self.log.info("Report status: %s", response["status"]) return response["status"] != "PROCESSING" def __init__( self, *, profile_id: str, report_id: str, file_id: str, api_version: str = "v3.3", gcp_conn_id: str = "google_cloud_default", delegate_to: Optional[str] = None, mode: str = "reschedule", poke_interval: int = 60 * 5, impersonation_chain: Optional[Union[str, Sequence[str]]] = None, **kwargs, ) -> None: super().__init__(**kwargs) self.mode = mode self.poke_interval = poke_interval self.profile_id = profile_id self.report_id = report_id self.file_id = file_id self.api_version = api_version self.gcp_conn_id = gcp_conn_id self.delegate_to = delegate_to self.impersonation_chain = impersonation_chain
{ "content_hash": "71b215579c1c8a037428e652e06376de", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 110, "avg_line_length": 40.372093023255815, "alnum_prop": 0.6575460829493087, "repo_name": "apache/incubator-airflow", "id": "686481787fe0367b21acc7e59e8136c7a56fa222", "size": "4259", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "airflow/providers/google/marketing_platform/sensors/campaign_manager.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "69070" }, { "name": "Dockerfile", "bytes": "2001" }, { "name": "HTML", "bytes": "283783" }, { "name": "JavaScript", "bytes": "1387552" }, { "name": "Mako", "bytes": "1284" }, { "name": "Python", "bytes": "5482822" }, { "name": "Shell", "bytes": "40957" } ], "symlink_target": "" }
from distutils.core import setup setup( name='Cloud Images Query', version='0.1.1', packages=['cloud_images'], author='Chris R. Bennett', author_email='source@mruser.com', url='https://github.com/mruser/cloud_images/', description='Query tool for Ubuntu cloud-images', long_description='', license='License :: OSI Approved :: MIT License', requires=['boto (>=2.8)', 'urllib3 (>=1.5)', 'argparse'], scripts=['scripts/cloud_images'], classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'Topic :: System :: Software Distribution' ] )
{ "content_hash": "5f0c954d3227d3f8bcfa1769d737fcfe", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 61, "avg_line_length": 34.61904761904762, "alnum_prop": 0.6217331499312242, "repo_name": "mruser/cloud_images", "id": "424861b7ebed941dc64830e2c01c254e83b1d13f", "size": "727", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "23099" } ], "symlink_target": "" }
""" The :mod:`sklearn.model_selection._validation` module includes classes and functions to validate the model. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>, # Gael Varoquaux <gael.varoquaux@normalesup.org>, # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from __future__ import print_function from __future__ import division import warnings import numbers import time import numpy as np import scipy.sparse as sp from ..base import is_classifier, clone from ..utils import indexable, check_random_state, safe_indexing from ..utils.fixes import astype from ..utils.validation import _is_arraylike, _num_samples from ..externals.joblib import Parallel, delayed, logger from ..metrics.scorer import check_scoring from ..exceptions import FitFailedWarning from ._split import KFold from ._split import LabelKFold from ._split import LeaveOneLabelOut from ._split import LeaveOneOut from ._split import LeavePLabelOut from ._split import LeavePOut from ._split import ShuffleSplit from ._split import LabelShuffleSplit from ._split import StratifiedKFold from ._split import StratifiedShuffleSplit from ._split import PredefinedSplit from ._split import check_cv, _safe_split __all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score', 'learning_curve', 'validation_curve'] ALL_CVS = {'KFold': KFold, 'LabelKFold': LabelKFold, 'LeaveOneLabelOut': LeaveOneLabelOut, 'LeaveOneOut': LeaveOneOut, 'LeavePLabelOut': LeavePLabelOut, 'LeavePOut': LeavePOut, 'ShuffleSplit': ShuffleSplit, 'LabelShuffleSplit': LabelShuffleSplit, 'StratifiedKFold': StratifiedKFold, 'StratifiedShuffleSplit': StratifiedShuffleSplit, 'PredefinedSplit': PredefinedSplit} LABEL_CVS = {'LabelKFold': LabelKFold, 'LeaveOneLabelOut': LeaveOneLabelOut, 'LeavePLabelOut': LeavePLabelOut, 'LabelShuffleSplit': LabelShuffleSplit} def cross_val_score(estimator, X, y=None, labels=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.cross_validation import cross_val_score >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS [ 0.33150734 0.08022311 0.03531764] See Also --------- :func:`sklearn.metrics.make_scorer`: Make a scorer from a performance metric or loss function. """ X, y, labels = indexable(X, y, labels) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv.split(X, y, labels)) return np.array(scores)[:, 0] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, error_score='raise'): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scorer : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score warnings.warn("Classifier fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%r" % (error_score, e), FitFailedWarning) else: raise ValueError("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)") else: test_score = _score(estimator, X_test, y_test, scorer) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def cross_val_predict(estimator, X, y=None, labels=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Generate cross-validated estimates for each input data point Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- predictions : ndarray This is the result of calling 'predict' Examples -------- >>> from sklearn import datasets, linear_model >>> from sklearn.cross_validation import cross_val_predict >>> diabetes = datasets.load_diabetes() >>> X = diabetes.data[:150] >>> y = diabetes.target[:150] >>> lasso = linear_model.Lasso() >>> y_pred = cross_val_predict(lasso, X, y) """ X, y, labels = indexable(X, y, labels) cv = check_cv(cv, y, classifier=is_classifier(estimator)) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) prediction_blocks = parallel(delayed(_fit_and_predict)( clone(estimator), X, y, train, test, verbose, fit_params) for train, test in cv.split(X, y, labels)) # Concatenate the predictions predictions = [pred_block_i for pred_block_i, _ in prediction_blocks] test_indices = np.concatenate([indices_i for _, indices_i in prediction_blocks]) if not _check_is_permutation(test_indices, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') inv_test_indices = np.empty(len(test_indices), dtype=int) inv_test_indices[test_indices] = np.arange(len(test_indices)) # Check for sparse predictions if sp.issparse(predictions[0]): predictions = sp.vstack(predictions, format=predictions[0].format) else: predictions = np.concatenate(predictions) return predictions[inv_test_indices] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. Returns ------- predictions : sequence Result of calling 'estimator.predict' test : array-like This is the value of the test parameter """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) predictions = estimator.predict(X_test) return predictions, test def _check_is_permutation(indices, n_samples): """Check whether indices is a reordering of the array np.arange(n_samples) Parameters ---------- indices : ndarray integer array to test n_samples : int number of expected elements Returns ------- is_partition : bool True iff sorted(locs) is range(n) """ if len(indices) != n_samples: return False hit = np.zeros(n_samples, bool) hit[indices] = True if not np.all(hit): return False return True def _index_param_value(X, v, indices): """Private helper function for parameter value indexing.""" if not _is_arraylike(v) or _num_samples(v) != _num_samples(X): # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices) def permutation_test_score(estimator, X, y, labels=None, cv=None, n_permutations=100, n_jobs=1, random_state=0, verbose=0, scoring=None): """Evaluate the significance of a cross-validated score with permutations Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape (n_permutations,) The scores obtained for each permutations. pvalue : float The returned value equals p-value if `scoring` returns bigger numbers for better scores (e.g., accuracy_score). If `scoring` is rather a loss function (i.e. when lower is better such as with `mean_squared_error`) then this is actually the complement of the p-value: 1 - p-value. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y, labels = indexable(X, y, labels) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, labels, cv, scorer) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, labels, random_state), labels, cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue permutation_test_score.__test__ = False # to avoid a pb with nosetests def _permutation_test_score(estimator, X, y, labels, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv.split(X, y, labels): estimator.fit(X[train], y[train]) avg_score.append(scorer(estimator, X[test], y[test])) return np.mean(avg_score) def _shuffle(y, labels, random_state): """Return a shuffled copy of y eventually shuffle among same labels.""" if labels is None: indices = random_state.permutation(len(y)) else: indices = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) indices[this_mask] = random_state.permutation(indices[this_mask]) return y[indices] def learning_curve(estimator, X, y, labels=None, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=1, pre_dispatch="all", verbose=0): """Learning curve. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : boolean, optional, default: False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. Returns ------- train_sizes_abs : array, shape = (n_unique_ticks,), dtype int Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <example_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y, labels = indexable(X, y, labels) cv = check_cv(cv, y, classifier=is_classifier(estimator)) cv_iter = cv.split(X, y, labels) # Make a list since we will be iterating multiple times over the folds cv_iter = list(cv_iter) scorer = check_scoring(estimator, scoring=scoring) n_max_training_samples = len(cv_iter[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose) for train, test in cv.split(X, y, labels)) else: out = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train[:n_train_samples], test, verbose, parameters=None, fit_params=None, return_train_score=True) for train, test in cv_iter for n_train_samples in train_sizes_abs) out = np.array(out)[:, :2] n_cv_folds = out.shape[0] // n_unique_ticks out = out.reshape(n_cv_folds, n_unique_ticks, 2) out = np.asarray(out).transpose((2, 1, 0)) return train_sizes_abs, out[0], out[1] def _translate_train_sizes(train_sizes, n_max_training_samples): """Determine absolute sizes of training subsets and validate 'train_sizes'. Examples: _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] _translate_train_sizes([5, 10], 10) -> [5, 10] Parameters ---------- train_sizes : array-like, shape (n_ticks,), dtype float or int Numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. n_max_training_samples : int Maximum number of training samples (upper bound of 'train_sizes'). Returns ------- train_sizes_abs : array, shape (n_unique_ticks,), dtype int Numbers of training examples that will be used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. """ train_sizes_abs = np.asarray(train_sizes) n_ticks = train_sizes_abs.shape[0] n_min_required_samples = np.min(train_sizes_abs) n_max_required_samples = np.max(train_sizes_abs) if np.issubdtype(train_sizes_abs.dtype, np.float): if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: raise ValueError("train_sizes has been interpreted as fractions " "of the maximum number of training samples and " "must be within (0, 1], but is within [%f, %f]." % (n_min_required_samples, n_max_required_samples)) train_sizes_abs = astype(train_sizes_abs * n_max_training_samples, dtype=np.int, copy=False) train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) else: if (n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples): raise ValueError("train_sizes has been interpreted as absolute " "numbers of training samples and must be within " "(0, %d], but is within [%d, %d]." % (n_max_training_samples, n_min_required_samples, n_max_required_samples)) train_sizes_abs = np.unique(train_sizes_abs) if n_ticks > train_sizes_abs.shape[0]: warnings.warn("Removed duplicate entries from 'train_sizes'. Number " "of ticks will be less than than the size of " "'train_sizes' %d instead of %d)." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning) return train_sizes_abs def _incremental_fit_estimator(estimator, X, y, classes, train, test, train_sizes, scorer, verbose): """Train estimator on training subsets incrementally and compute scores.""" train_scores, test_scores = [], [] partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) for n_train_samples, partial_train in partitions: train_subset = train[:n_train_samples] X_train, y_train = _safe_split(estimator, X, y, train_subset) X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) X_test, y_test = _safe_split(estimator, X, y, test, train_subset) if y_partial_train is None: estimator.partial_fit(X_partial_train, classes=classes) else: estimator.partial_fit(X_partial_train, y_partial_train, classes=classes) train_scores.append(_score(estimator, X_train, y_train, scorer)) test_scores.append(_score(estimator, X_test, y_test, scorer)) return np.array((train_scores, test_scores)).T def validation_curve(estimator, X, y, param_name, param_range, labels=None, cv=None, scoring=None, n_jobs=1, pre_dispatch="all", verbose=0): """Validation curve. Determine training and test scores for varying parameter values. Compute scores for an estimator with different values of a specified parameter. This is similar to grid search with one parameter. However, this will also compute training scores and is merely a utility for plotting the results. Read more in the :ref:`User Guide <learning_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. param_name : string Name of the parameter that will be varied. param_range : array-like, shape (n_values,) The values of the parameter that will be evaluated. labels : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - An object to be used as a cross-validation generator. - An iterable yielding train, test splits. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` used. In all other cases, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. Returns ------- train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`examples/model_selection/plot_validation_curve.py <example_model_selection_plot_validation_curve.py>` """ X, y, labels = indexable(X, y, labels) cv = check_cv(cv, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) out = parallel(delayed(_fit_and_score)( estimator, X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=None, return_train_score=True) for train, test in cv.split(X, y, labels) for v in param_range) out = np.asarray(out)[:, :2] n_params = len(param_range) n_cv_folds = out.shape[0] // n_params out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0)) return out[0], out[1]
{ "content_hash": "c3a3309f1c4994624f821aae9d715f05", "timestamp": "", "source": "github", "line_count": 950, "max_line_length": 79, "avg_line_length": 38.369473684210526, "alnum_prop": 0.6336177333955173, "repo_name": "olologin/scikit-learn", "id": "11b2a218d33e4dcbf0cdb4868cc277bbdc536f1e", "size": "36451", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "sklearn/model_selection/_validation.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "3366" }, { "name": "C", "bytes": "394787" }, { "name": "C++", "bytes": "140225" }, { "name": "Makefile", "bytes": "1579" }, { "name": "PowerShell", "bytes": "17042" }, { "name": "Python", "bytes": "6528219" }, { "name": "Shell", "bytes": "9256" } ], "symlink_target": "" }
import unittest from tests import testutils from common import environment from tests import testconstants from services.restaurants.services import countrysubdivisionslocalfavoritesservice from services.restaurants.domain.options import countrysubdivisionslocalfavoritesrequestoptions class CountrySubdivisionsLocalFavoritesServiceTest(unittest.TestCase): def setUp(self): test_utils = testutils.TestUtils(environment.Environment.SANDBOX) self._service = countrysubdivisionslocalfavoritesservice.CountrySubdivisionsLocalFavoritesService( testconstants.TestConstants.SANDBOX_CONSUMER_KEY, test_utils.get_private_key(), environment.Environment.SANDBOX) def test_country_subdivision_service(self): options = countrysubdivisionslocalfavoritesrequestoptions.CountrySubdivisionsLocalFavoritesRequestOptions('USA') country_subdivisions = self._service.get_country_subdivisions(options) assert country_subdivisions is not None assert len(country_subdivisions.country_subdivision) > 0
{ "content_hash": "0214e77f2f2814dbcdb0c94febf5497b", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 120, "avg_line_length": 59, "alnum_prop": 0.6924939467312349, "repo_name": "M4gn4tor/mastercard-api-python", "id": "997cc6858da3c7133379a4220b1f678baa196626", "size": "1239", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Tests/services/restaurants/countrysubdivisionslocalfavoritesservicetest.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "229234" } ], "symlink_target": "" }
""" homeassistant.bootstrap ~~~~~~~~~~~~~~~~~~~~~~~ Provides methods to bootstrap a home assistant instance. Each method will return a tuple (bus, statemachine). After bootstrapping you can add your own components or start by calling homeassistant.start_home_assistant(bus) """ import os import logging from collections import defaultdict import homeassistant.core as core import homeassistant.util.dt as date_util import homeassistant.util.package as pkg_util import homeassistant.util.location as loc_util import homeassistant.config as config_util import homeassistant.loader as loader import homeassistant.components as core_components import homeassistant.components.group as group from homeassistant.helpers.entity import Entity from homeassistant.const import ( EVENT_COMPONENT_LOADED, CONF_LATITUDE, CONF_LONGITUDE, CONF_TEMPERATURE_UNIT, CONF_NAME, CONF_TIME_ZONE, CONF_CUSTOMIZE, TEMP_CELCIUS, TEMP_FAHRENHEIT) _LOGGER = logging.getLogger(__name__) ATTR_COMPONENT = 'component' PLATFORM_FORMAT = '{}.{}' def setup_component(hass, domain, config=None): """ Setup a component and all its dependencies. """ if domain in hass.config.components: return True _ensure_loader_prepared(hass) if config is None: config = defaultdict(dict) components = loader.load_order_component(domain) # OrderedSet is empty if component or dependencies could not be resolved if not components: return False for component in components: if component in hass.config.components: continue if not _setup_component(hass, component, config): return False return True def _handle_requirements(component, name): """ Installs requirements for component. """ if not hasattr(component, 'REQUIREMENTS'): return True for req in component.REQUIREMENTS: if not pkg_util.install_package(req): _LOGGER.error('Not initializing %s because could not install ' 'dependency %s', name, req) return False return True def _setup_component(hass, domain, config): """ Setup a component for Home Assistant. """ component = loader.get_component(domain) missing_deps = [dep for dep in component.DEPENDENCIES if dep not in hass.config.components] if missing_deps: _LOGGER.error( 'Not initializing %s because not all dependencies loaded: %s', domain, ", ".join(missing_deps)) return False if not _handle_requirements(component, domain): return False try: if not component.setup(hass, config): _LOGGER.error('component %s failed to initialize', domain) return False except Exception: # pylint: disable=broad-except _LOGGER.exception('Error during setup of component %s', domain) return False hass.config.components.append(component.DOMAIN) # Assumption: if a component does not depend on groups # it communicates with devices if group.DOMAIN not in component.DEPENDENCIES: hass.pool.add_worker() hass.bus.fire( EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: component.DOMAIN}) return True def prepare_setup_platform(hass, config, domain, platform_name): """ Loads a platform and makes sure dependencies are setup. """ _ensure_loader_prepared(hass) platform_path = PLATFORM_FORMAT.format(domain, platform_name) platform = loader.get_component(platform_path) # Not found if platform is None: return None # Already loaded elif platform_path in hass.config.components: return platform # Load dependencies if hasattr(platform, 'DEPENDENCIES'): for component in platform.DEPENDENCIES: if not setup_component(hass, component, config): _LOGGER.error( 'Unable to prepare setup for platform %s because ' 'dependency %s could not be initialized', platform_path, component) return None if not _handle_requirements(platform, platform_path): return None return platform # pylint: disable=too-many-branches, too-many-statements def from_config_dict(config, hass=None): """ Tries to configure Home Assistant from a config dict. Dynamically loads required components and its dependencies. """ if hass is None: hass = core.HomeAssistant() process_ha_core_config(hass, config.get(core.DOMAIN, {})) enable_logging(hass) _ensure_loader_prepared(hass) # Make a copy because we are mutating it. # Convert it to defaultdict so components can always have config dict # Convert values to dictionaries if they are None config = defaultdict( dict, {key: value or {} for key, value in config.items()}) # Filter out the repeating and common config section [homeassistant] components = (key for key in config.keys() if ' ' not in key and key != core.DOMAIN) if not core_components.setup(hass, config): _LOGGER.error('Home Assistant core failed to initialize. ' 'Further initialization aborted.') return hass _LOGGER.info('Home Assistant core initialized') # Setup the components for domain in loader.load_order_components(components): _setup_component(hass, domain, config) return hass def from_config_file(config_path, hass=None): """ Reads the configuration file and tries to start all the required functionality. Will add functionality to 'hass' parameter if given, instantiates a new Home Assistant object if 'hass' is not given. """ if hass is None: hass = core.HomeAssistant() # Set config dir to directory holding config file hass.config.config_dir = os.path.abspath(os.path.dirname(config_path)) config_dict = config_util.load_config_file(config_path) return from_config_dict(config_dict, hass) def enable_logging(hass): """ Setup the logging for home assistant. """ logging.basicConfig(level=logging.INFO) fmt = ("%(log_color)s%(asctime)s %(levelname)s (%(threadName)s) " "[%(name)s] %(message)s%(reset)s") try: from colorlog import ColoredFormatter logging.getLogger().handlers[0].setFormatter(ColoredFormatter( fmt, datefmt='%y-%m-%d %H:%M:%S', reset=True, log_colors={ 'DEBUG': 'cyan', 'INFO': 'green', 'WARNING': 'yellow', 'ERROR': 'red', 'CRITICAL': 'red', } )) except ImportError: _LOGGER.warning( "Colorlog package not found, console coloring disabled") # Log errors to a file if we have write access to file or config dir err_log_path = hass.config.path('home-assistant.log') err_path_exists = os.path.isfile(err_log_path) # Check if we can write to the error log if it exists or that # we can create files in the containing directory if not. if (err_path_exists and os.access(err_log_path, os.W_OK)) or \ (not err_path_exists and os.access(hass.config.config_dir, os.W_OK)): err_handler = logging.FileHandler( err_log_path, mode='w', delay=True) err_handler.setLevel(logging.WARNING) err_handler.setFormatter( logging.Formatter('%(asctime)s %(name)s: %(message)s', datefmt='%y-%m-%d %H:%M:%S')) logging.getLogger('').addHandler(err_handler) else: _LOGGER.error( 'Unable to setup error log %s (access denied)', err_log_path) def process_ha_core_config(hass, config): """ Processes the [homeassistant] section from the config. """ hac = hass.config def set_time_zone(time_zone_str): """ Helper method to set time zone in HA. """ if time_zone_str is None: return time_zone = date_util.get_time_zone(time_zone_str) if time_zone: hac.time_zone = time_zone date_util.set_default_time_zone(time_zone) else: _LOGGER.error('Received invalid time zone %s', time_zone_str) for key, attr in ((CONF_LATITUDE, 'latitude'), (CONF_LONGITUDE, 'longitude'), (CONF_NAME, 'location_name')): if key in config: setattr(hac, attr, config[key]) set_time_zone(config.get(CONF_TIME_ZONE)) customize = config.get(CONF_CUSTOMIZE) if isinstance(customize, dict): for entity_id, attrs in config.get(CONF_CUSTOMIZE, {}).items(): if not isinstance(attrs, dict): continue Entity.overwrite_attribute(entity_id, attrs.keys(), attrs.values()) if CONF_TEMPERATURE_UNIT in config: unit = config[CONF_TEMPERATURE_UNIT] if unit == 'C': hac.temperature_unit = TEMP_CELCIUS elif unit == 'F': hac.temperature_unit = TEMP_FAHRENHEIT # If we miss some of the needed values, auto detect them if None not in ( hac.latitude, hac.longitude, hac.temperature_unit, hac.time_zone): return _LOGGER.info('Auto detecting location and temperature unit') info = loc_util.detect_location_info() if info is None: _LOGGER.error('Could not detect location information') return if hac.latitude is None and hac.longitude is None: hac.latitude = info.latitude hac.longitude = info.longitude if hac.temperature_unit is None: if info.use_fahrenheit: hac.temperature_unit = TEMP_FAHRENHEIT else: hac.temperature_unit = TEMP_CELCIUS if hac.location_name is None: hac.location_name = info.city if hac.time_zone is None: set_time_zone(info.time_zone) def _ensure_loader_prepared(hass): """ Ensure Home Assistant loader is prepared. """ if not loader.PREPARED: loader.prepare(hass)
{ "content_hash": "6c443375f210315c57fc01a7b1528c7b", "timestamp": "", "source": "github", "line_count": 325, "max_line_length": 79, "avg_line_length": 31.055384615384614, "alnum_prop": 0.6398494005746557, "repo_name": "CCOSTAN/home-assistant", "id": "e5f6d2b967259e06f1946d735b647be57f3b2408", "size": "10093", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "homeassistant/bootstrap.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "798938" }, { "name": "Python", "bytes": "769877" }, { "name": "Shell", "bytes": "5097" } ], "symlink_target": "" }
import logging import json import time import urllib.request import urllib.parse import numpy logger = logging.getLogger(__name__) DATE_FORMATS = [ '%Y-%m-%d', '%Y/%m/%d', '%B %d, %Y', '%b %d, %Y', '%d %B %Y', '%d. %B %Y', ] # Used these to harvest as many birth dates as possible DATE_ANOMALIES = { 'T08:30': '', 'T15:30': '', 'T00:00:00': '', 'T16:00': '', 'T13:25': '', 'T17:55Z': '', 'T06:19': '', 'T23:55': '', 'T01:51': '', 'T20:44': '', 'T23:19': '', 'T04:11Z': '', 'T04:45': '', 'T09:03+01:00': '', 'T19:46': '', 'T09': '', 'T23:30Z': '', 'T12:13': '', } FREEBASE_API_KEY = 'Your Google API Key Here' FREEBASE_API_ENDPOINT = 'https://www.googleapis.com/freebase/v1/mqlread' def query_freebase(query, cursor='', wait=None): if wait is None: wait = .864 params = { 'query': '[{}]'.format(json.dumps(query)), 'cursor': cursor, 'key': FREEBASE_API_KEY, } url = '{}?{}'.format(FREEBASE_API_ENDPOINT, urllib.parse.urlencode(params)) # With api key I use, I have 100k requests per day limit, which boils down to a bit more than one per second. # To avoid forgetting to set this anywhere else, it's here. Better to wait for 1 second than get banned. time.sleep(wait) request = urllib.request.Request(url) try: response = urllib.request.urlopen(request) return response.read().decode('utf-8') except Exception as e: logger.error('Error reading from freebase') logger.error(e) return json.dumps({'error': 'error'}) def clean_date_anomalies(date_string): for anomaly, replacement in DATE_ANOMALIES.items(): date_string = date_string.replace(anomaly, replacement) return date_string.strip() def update_statistical_data(zodiac): min_value = min(list(zodiac.get('distributionPercentages').values())) max_value = max(list(zodiac.get('distributionPercentages').values())) for_update = { 'min': min_value, 'max': max_value, 'range': max_value - min_value, 'mean': numpy.mean(list(zodiac.get('distributionPercentages').values())), 'median': numpy.median(list(zodiac.get('distributionPercentages').values())), 'standardDeviation': numpy.std(list(zodiac.get('distributionPercentages').values())), 'average': numpy.average(list(zodiac.get('distributionPercentages').values())), } zodiac.update(for_update) return zodiac
{ "content_hash": "4ac77a6884cf7d20d65f69772afbfa4e", "timestamp": "", "source": "github", "line_count": 94, "max_line_length": 110, "avg_line_length": 24.47872340425532, "alnum_prop": 0.6562364189482833, "repo_name": "Pancho/failiac", "id": "63d52711a61684a296004a40c8f3431f80208ca9", "size": "2301", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "external/utils.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "8000" }, { "name": "JavaScript", "bytes": "2420056" }, { "name": "Python", "bytes": "4058" } ], "symlink_target": "" }
""" Copyright 2013 LinkedIn Corp. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import naarad.utils import logging import os from naarad.metrics.metric import Metric logger = logging.getLogger('naarad.metrics.INNOMetric') class INNOMetric(Metric): C_MAX_COMMANDS = 10 graph_lib = None def __init__(self, metric_type, infile, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, anomaly_detection_metrics, **other_options): Metric.__init__(self, metric_type, infile, hostname, aggr_metrics, outdir, resource_path, label, ts_start, ts_end, rule_strings, important_sub_metrics, anomaly_detection_metrics) for (key, val) in other_options.iteritems(): setattr(self, key, val.split()) def get_csv_C(self, command, column): outcsv = os.path.join(self.resource_directory, "{0}.{1}.{2}.csv".format(self.metric_type, command, column)) self.csv_column_map[outcsv] = command + '.' + column return outcsv def parse(self): logger.info("Working on innotop metric: %s", self.infile) if self.metric_type == "INNOTOP-C": return self.parse_innotop_mode_c() elif self.metric_type == "INNOTOP-M": return self.parse_innotop_mode_m() else: return self.parse_innotop_mode_b() def parse_innotop_mode_c(self): with open(self.infile, 'r') as infh: headerline = infh.readline() columns = headerline.split()[2:] outfilehandlers = {} for line in infh: l = line.strip().split(' ', 1) if len(l) <= 1: continue ts = l[0].strip().replace('T', ' ') try: nameval = l[1].strip().split('\t', 1) except IndexError: logger.warn("Badly formatted line: %s", line) logger.warn("Expected tab separated values") continue command = nameval[0] if command not in outfilehandlers: # Only looking at top N commands if len(outfilehandlers) > self.C_MAX_COMMANDS: continue # TODO(rmaheshw) : Use collections.defaultdict instead to avoid initializing dicts outfilehandlers[command] = {} words = nameval[1].split('\t') for i in range(len(words)): if self.options and columns[i] not in self.options: continue if columns[i] not in outfilehandlers[command]: outfilehandlers[command][columns[i]] = open(self.get_csv_C(command, columns[i]), 'w') self.csv_files.append(self.get_csv_C(command, columns[i])) ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone) outfilehandlers[command][columns[i]].write(ts + ',') outfilehandlers[command][columns[i]].write(words[i]) outfilehandlers[command][columns[i]].write('\n') for command in outfilehandlers: for column in outfilehandlers[command]: outfilehandlers[command][column].close() return True def parse_innotop_mode_b(self): """ Generic parsing method for all other modes """ with open(self.infile, 'r') as infh: # Pre processing to figure out different headers max_row_quot = 0 valrow = -1 thisrowcolumns = {} data = {} while True: line1 = infh.readline() words = line1.split() # special case for -I (iostat) option # skipping all the 'thread' lines if words[1] == "thread" and self.metric_type == "INNOTOP-I": while True: line1 = infh.readline() words = line1.split() if naarad.utils.is_number(words[1]): line1 = infh.readline() else: break if words[1] == "thread" and self.metric_type == "INNOTOP-R": break # Skip next line infh.readline() last_ts = words[0].strip().replace('T', ' ') if not naarad.utils.is_number(words[1]): thisrowcolumns[max_row_quot] = words[1:] for column in words[1:]: if self.options and column not in self.options: continue data[column] = [] if self.metric_type == "INNOTOP-I": data["check_pt_age"] = [] max_row_quot += 1 else: break # infh.seek(0) # Real Processing for line in infh: l = line.strip().split(' ', 1) if len(l) <= 1: continue ts = l[0].strip().replace('T', ' ') if not ts == last_ts: last_ts = ts valrow = -1 try: words = l[1].strip().split('\t') except IndexError: logger.warn("Bad line: %s", line) continue # special case for -I (iostat) option # skipping all the 'thread' lines if words[0] == "thread" or (naarad.utils.is_number(words[0]) and "thread" in words[1]): continue if naarad.utils.is_number(words[0]): valrow += 1 quot = valrow % max_row_quot # Special case for -R, skipping all 'thread' value lines if quot >= len(thisrowcolumns): continue columns = thisrowcolumns[quot] if len(words) > len(columns): continue for i in range(len(words)): if self.options and columns[i] not in self.options: continue column = columns[i] # Converting -- to 0, seen this for buf_pool_hit_rate if words[i] == "--": words[i] = "0" ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone) # Calculating check point age if self.metric_type == "INNOTOP-I": if column == "log_seq_no": log_seq_no = int(words[i]) elif column == "log_flushed_to": check_pt_age = log_seq_no - int(words[i]) tup = [ts, str(check_pt_age)] data["check_pt_age"].append(tup) tup = [ts, words[i]] data[column].append(tup) # Post Proc, writing the different out files for column in data: csvfile = self.get_csv(column) self.csv_files.append(csvfile) with open(csvfile, 'w') as outfh: for tup in data[column]: outfh.write(','.join(tup)) outfh.write('\n') return True def parse_innotop_mode_m(self): """ Special parsing method for Innotop "Replication Status" results (innotop --mode M)""" with open(self.infile, 'r') as infh: # Pre processing to figure out different headers max_row_quot = 0 valrow = -1 thisrowcolumns = {} data = {} last_ts = None while True: # 2012-05-11T00:00:02 master_host slave_sql_running time_behind_master slave_catchup_rate slave_open_temp_tables relay_log_pos last_error line1 = infh.readline() words = line1.split() # Skip next line infh.readline() is_header = True for word in words: if naarad.utils.is_number(word): last_ts = words[0].strip().replace('T', ' ') is_header = False break # from this loop if len(words) > 2 and is_header: thisrowcolumns[max_row_quot] = words[2:] for column in thisrowcolumns[max_row_quot]: data[column] = [] max_row_quot += 1 else: break # from pre-processing. All headers accounted for # Real Processing if not last_ts: logger.warn("last_ts not set, looks like there is no data in file %s", self.infile) return True infh.seek(0) is_bad_line = False outfilehandlers = {} for line in infh: l = line.strip().split(' ', 1) # Blank line if len(l) <= 1: continue ts = l[0].strip().replace('T', ' ') if ts != last_ts: last_ts = ts valrow = -1 nameval = l[1].strip().split('\t', 1) try: words = nameval[1].split('\t') except IndexError: logger.warn("Bad line: %s", line) continue valrow += 1 command = nameval[0] if command not in outfilehandlers: outfilehandlers[command] = {} quot = valrow % max_row_quot columns = thisrowcolumns[quot] for i in range(len(words)): if len(words) > len(columns): logger.warn("Mismatched number of columns: %s", line) logger.warn("%d %d", len(words), len(columns)) break if words[i] in columns: logger.warn("Skipping line: %s", line) valrow -= 1 break if self.options and columns[i] not in self.options: continue if columns[i] not in outfilehandlers[command]: outfilehandlers[command][columns[i]] = open(self.get_csv_C(command, columns[i]), 'w') self.csv_files.append(self.get_csv_C(command, columns[i])) ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone) outfilehandlers[command][columns[i]].write(ts + ',') outfilehandlers[command][columns[i]].write(words[i]) outfilehandlers[command][columns[i]].write('\n') for command in outfilehandlers: for column in outfilehandlers[command]: outfilehandlers[command][column].close() return True
{ "content_hash": "fdaa9c555dfef606af69d39b038c6ac2", "timestamp": "", "source": "github", "line_count": 262, "max_line_length": 155, "avg_line_length": 38, "alnum_prop": 0.5758336681398152, "repo_name": "linkedin/naarad", "id": "0d838c6f9b362d745148aa0a49bc01dcee9b7710", "size": "9971", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "src/naarad/metrics/innotop_metric.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Awk", "bytes": "83289" }, { "name": "CSS", "bytes": "4420" }, { "name": "HTML", "bytes": "41597" }, { "name": "JavaScript", "bytes": "42346" }, { "name": "Python", "bytes": "381839" }, { "name": "Shell", "bytes": "2161" } ], "symlink_target": "" }
import pretend import pytest from wtforms.validators import StopValidation, ValidationError from warehouse.forms import ( Form, DBForm, URIValidator, PasswordStrengthValidator, ) class TestURIValidator: @pytest.mark.parametrize( "uri", [ "https://example.com/", "http://example.com/", "https://sub.example.com/path?query#thing", ], ) def test_valid(self, uri): URIValidator()(pretend.stub(), pretend.stub(data=uri)) @pytest.mark.parametrize( "uri", [ "javascript:alert(0)", "UNKNOWN", "ftp://example.com/", ], ) def test_invalid(self, uri): validator = URIValidator() with pytest.raises(ValidationError): validator(pretend.stub(), pretend.stub(data=uri)) def test_plain_schemes(self): validator = URIValidator(require_scheme=True, allowed_schemes=[]) validator(pretend.stub(), pretend.stub(data="ftp://example.com/")) class TestPasswordStrengthValidator: def test_invalid_fields(self): validator = PasswordStrengthValidator(user_input_fields=["foo"]) with pytest.raises(ValidationError) as exc: validator({}, pretend.stub()) assert str(exc.value) == "Invalid field name: 'foo'" @pytest.mark.parametrize("password", ["this is a great password!"]) def test_good_passwords(self, password): validator = PasswordStrengthValidator() validator(pretend.stub(), pretend.stub(data=password)) @pytest.mark.parametrize( ("password", "expected"), [ ( "qwerty", ("This is a top-10 common password. Add another word or two. " "Uncommon words are better."), ), ( "bombo!b", ("Password is too easily guessed. Add another word or two. " "Uncommon words are better."), ), ("bombo!b asdadad", "Password is too easily guessed."), ], ) def test_invalid_password(self, password, expected): validator = PasswordStrengthValidator(required_strength=5) with pytest.raises(ValidationError) as exc: validator(pretend.stub(), pretend.stub(data=password)) assert str(exc.value) == expected def _raiser(exc): raise exc class TestForm: def test_empty_form_no_errors(self): form = Form() assert form.errors == {} def test_errors_is_cached(self): form = Form() assert form.errors == {} form._form_errors.append("An Error") assert form.errors == {} form._errors = None assert form.errors == {"__all__": ["An Error"]} def test_form_level_validation_no_validators(self): class TestForm(Form): pass form = TestForm() assert form.validate() assert form.errors == {} def test_form_level_validation_full_validate(self): class TestForm(Form): @pretend.call_recorder def full_validate(self): pass form = TestForm() assert form.validate() assert form.errors == {} assert form.full_validate.calls == [pretend.call(form)] def test_form_level_validation_full_validate_fails(self): class TestForm(Form): @pretend.call_recorder def full_validate(self): raise ValueError("A Value Error") form = TestForm() assert not form.validate() assert form.errors == {"__all__": ["A Value Error"]} assert form.full_validate.calls == [pretend.call(form)] @pytest.mark.parametrize( "validator_funcs", [ [], [lambda f: None] ], ) def test_form_level_validation_meta_works(self, validator_funcs): validator_funcs = [pretend.call_recorder(v) for v in validator_funcs] class TestForm(Form): class Meta: validators = validator_funcs form = TestForm() assert form.validate() assert form.errors == {} for v in validator_funcs: assert v.calls == [pretend.call(form)] @pytest.mark.parametrize( ("validator_funcs", "errors", "stop_after"), [ ( [ lambda f: _raiser(ValueError("An Error")), lambda f: None, lambda f: _raiser(ValueError("Another Error")), lambda f: _raiser(StopValidation("Stop!")), lambda f: _raiser(ValueError("This Won't Show.")), ], ["An Error", "Another Error", "Stop!"], 3, ), ( [ lambda f: _raiser(ValueError("An Error")), lambda f: None, lambda f: _raiser(ValueError("Another Error")), lambda f: _raiser(StopValidation), lambda f: _raiser(ValueError("This Won't Show.")), ], ["An Error", "Another Error"], 3, ), ], ) def test_form_level_validation_meta_fails(self, validator_funcs, errors, stop_after): validator_funcs = [pretend.call_recorder(v) for v in validator_funcs] class TestForm(Form): class Meta: validators = validator_funcs form = TestForm() assert not form.validate() assert form.errors == {"__all__": errors} for i, v in enumerate(validator_funcs): assert v.calls == [pretend.call(form)] if i >= stop_after: break class TestDBForm: def test_form_requires_db(self): with pytest.raises(TypeError): DBForm() def test_form_accepts_db(self): db = pretend.stub() form = DBForm(db=db) assert form.db is db
{ "content_hash": "d81e5aadf8e7e5d53fbc77ae61976fc5", "timestamp": "", "source": "github", "line_count": 204, "max_line_length": 78, "avg_line_length": 29.720588235294116, "alnum_prop": 0.5362031997361042, "repo_name": "alex/warehouse", "id": "f2db87fb865f16a64d4a04fcc9c0a5c3b2b2803f", "size": "6604", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/unit/test_forms.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "135448" }, { "name": "HTML", "bytes": "95233" }, { "name": "JavaScript", "bytes": "27705" }, { "name": "Makefile", "bytes": "5817" }, { "name": "Mako", "bytes": "1505" }, { "name": "Perl", "bytes": "15498" }, { "name": "Python", "bytes": "909699" }, { "name": "Shell", "bytes": "4504" } ], "symlink_target": "" }
from direct.showbase import DirectObject from direct.directnotify import DirectNotifyGlobal from direct.distributed import DistributedObject from direct.task import Task from direct.distributed import DoInterestManager from otp.distributed.OtpDoGlobals import * _ToonTownDistrictStatInterest = None _ToonTownDistrictStatInterestComplete = 0 _trashObject = DirectObject.DirectObject() def EventName(): return 'ShardPopulationSet' def isOpen(): global _ToonTownDistrictStatInterest return _ToonTownDistrictStatInterest is not None def isComplete(): global _ToonTownDistrictStatInterestComplete return _ToonTownDistrictStatInterestComplete def open(event = None): global _trashObject global _ToonTownDistrictStatInterest if not isOpen(): def _CompleteProc(event): global _ToonTownDistrictStatInterestComplete _ToonTownDistrictStatInterestComplete = 1 if event is not None: messenger.send(event) return _trashObject.acceptOnce(EventName(), _CompleteProc) _ToonTownDistrictStatInterest = base.cr.addInterest(OTP_DO_ID_TOONTOWN, OTP_ZONE_ID_DISTRICTS_STATS, EventName(), EventName()) elif isComplete(): messenger.send(EventName()) def refresh(event = None): global _ToonTownDistrictStatInterest if isOpen(): if isComplete(): messenger.send(EventName()) if event is not none: messenger.send(event) else: def _CompleteProc(event): global _ToonTownDistrictStatInterestComplete _ToonTownDistrictStatInterestComplete = 1 if event is not None: messenger.send(event) close() return _trashObject.acceptOnce(EventName(), _CompleteProc, [event]) _ToonTownDistrictStatInterest = base.cr.addInterest(OTP_DO_ID_TOONTOWN, OTP_ZONE_ID_DISTRICTS_STATS, EventName(), EventName()) def close(): global _ToonTownDistrictStatInterest global _ToonTownDistrictStatInterestComplete if isOpen(): _ToonTownDistrictStatInterestComplete = 0 base.cr.removeInterest(_ToonTownDistrictStatInterest, None) _ToonTownDistrictStatInterest = None class ToontownDistrictStats(DistributedObject.DistributedObject): neverDisable = 1 def __init__(self, cr): DistributedObject.DistributedObject.__init__(self, cr) self.toontownDistrictId = 0 def settoontownDistrictId(self, value): self.toontownDistrictId = value def setAvatarCount(self, avatarCount): if self.toontownDistrictId in self.cr.activeDistrictMap: self.cr.activeDistrictMap[self.toontownDistrictId].avatarCount = avatarCount def setNewAvatarCount(self, newAvatarCount): if self.toontownDistrictId in self.cr.activeDistrictMap: self.cr.activeDistrictMap[self.toontownDistrictId].newAvatarCount = newAvatarCount def setInvasionStatus(self, invasionStatus): if self.toontownDistrictId in self.cr.activeDistrictMap: self.cr.activeDistrictMap[self.toontownDistrictId].invasionStatus = invasionStatus def setStats(self, avatarCount, newAvatarCount): self.setAvatarCount(avatarCount) self.setNewAvatarCount(newAvatarCount)
{ "content_hash": "070d6ad22d392e03c8351a73c007a580", "timestamp": "", "source": "github", "line_count": 89, "max_line_length": 134, "avg_line_length": 37.08988764044944, "alnum_prop": 0.7185701302635565, "repo_name": "linktlh/Toontown-journey", "id": "ee6bbaaec896e368d4445298159beb987990317e", "size": "3301", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "toontown/distributed/ToontownDistrictStats.py", "mode": "33261", "license": "apache-2.0", "language": [], "symlink_target": "" }
"""Provides the Document model""" from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, LargeBinary BASE = declarative_base() class Document(BASE): # pylint: disable=too-few-public-methods """Document model""" __tablename__ = 'documents' document_id = Column(Integer, primary_key=True) document_name = Column(String) document_content = Column(LargeBinary)
{ "content_hash": "93a65f5ace7d3e076d189d6bb064ddce", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 62, "avg_line_length": 30.714285714285715, "alnum_prop": 0.7325581395348837, "repo_name": "FreakJoe/cryptolockpy", "id": "26c5aaaf7de557024b994a484adc626144425d84", "size": "430", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cryptolock/Document.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "232" }, { "name": "Python", "bytes": "26204" } ], "symlink_target": "" }
"""Views used by custom social_auth pipeline""" from django.conf import settings from django.core.urlresolvers import reverse from django.views.generic.edit import FormView from storybase_user.social_auth.forms import TosForm, EmailTosForm class GetExtraAccountDetailsView(FormView): """Get additional user data during association with OAuth account""" template_name = "storybase_user/account_extra_details.html" def _get_backend(self): """Get which social_auth backend is being used""" name = getattr(settings, 'SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline') return self.request.session[name]['backend'] def form_valid(self, form): self.request.session['new_account_email'] = form.cleaned_data.get('email', None) self.request.session['new_account_extra_details'] = True return super(GetExtraAccountDetailsView, self).form_valid(form) def get_form_class(self): backend = self._get_backend() if backend == 'twitter': return EmailTosForm else: return TosForm def get_success_url(self): backend = self._get_backend() success_url = reverse('socialauth_complete', kwargs={'backend': backend}) return success_url
{ "content_hash": "f70d3f13b414d9bda8a2247b582143d3", "timestamp": "", "source": "github", "line_count": 34, "max_line_length": 88, "avg_line_length": 37.94117647058823, "alnum_prop": 0.6751937984496124, "repo_name": "denverfoundation/storybase", "id": "36e8998850653442ff96ef12d1a117b7924c6bf1", "size": "1290", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "apps/storybase_user/social_auth/views.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "285649" }, { "name": "Cucumber", "bytes": "176820" }, { "name": "HTML", "bytes": "286197" }, { "name": "JavaScript", "bytes": "1623541" }, { "name": "Makefile", "bytes": "1006" }, { "name": "Python", "bytes": "3020016" }, { "name": "Shell", "bytes": "23932" } ], "symlink_target": "" }
from django.core.paginator import Paginator, InvalidPage from django.utils.translation import ugettext_lazy as _ from haystack import connections from oscar.core.loading import get_class from . import facets FacetMunger = get_class('search.facets', 'FacetMunger') # Workaround for pysolr 3.1 not supporting and failing hard on # Solr 4.0 error messages # https://github.com/toastdriven/pysolr/pull/127 # https://github.com/toastdriven/pysolr/pull/113 # TODO: Remove in Oscar 0.9 try: import pysolr except ImportError: pass else: if pysolr.__version__[:2] < (3, 2): import logging logger = logging.getLogger() logger.warning( "You're running an old version of pysolr that is causing issues " "with Oscar. Please upgrade to 3.2 or higher.") class SearchHandler(object): """ A class that is concerned with performing a search and paginating the results. The search is triggered upon initialisation (mainly to have a predictable point to process any errors). Search results are cached, so they can be accessed multiple times without incurring any overhead. The raison d'etre for this third way to interface with Haystack is two-fold. The Haystack search form doesn't do enough for our needs, and basing a view off a Haystack search view is unnecessarily invasive. Furthermore, using our own search handler means it is easy to swap out Haystack, which has been considered before. Usage: handler = SearchHandler(request.GET, request.get_full_path) found_objects = handler.get_paginated_objects() context = handler.get_search_context_data() Error handling: You need to catch an InvalidPage exception which gets thrown when an invalid page number is supplied. """ form_class = None model_whitelist = None paginate_by = None paginator_class = Paginator page_kwarg = 'page' def __init__(self, request_data, full_path): self.full_path = full_path self.request_data = request_data # Triggers the search. search_queryset = self.get_search_queryset() self.search_form = self.get_search_form( request_data, search_queryset) self.results = self.get_search_results(self.search_form) # If below raises an UnicodeDecodeError, you're running pysolr < 3.2 # with Solr 4. self.paginator, self.page = self.paginate_queryset( self.results, request_data) # Search related methods def get_search_results(self, search_form): """ Perform the actual search using Haystack's search form. Returns a SearchQuerySet. The SQS is empty if the form is invalid. """ return search_form.search() def get_search_form(self, request_data, search_queryset): """ Return a bound version of Haystack's search form. """ kwargs = { 'data': request_data, 'selected_facets': request_data.getlist("selected_facets"), 'searchqueryset': search_queryset } return self.form_class(**kwargs) def get_search_queryset(self): """ Returns the search queryset that is used as a base for the search. """ sqs = facets.base_sqs() if self.model_whitelist: # Limit queryset to specified list of models sqs = sqs.models(*self.model_whitelist) return sqs # Pagination related methods def paginate_queryset(self, queryset, request_data): """ Paginate the search results. This is a simplified version of Django's MultipleObjectMixin.paginate_queryset """ paginator = self.get_paginator(queryset) page_kwarg = self.page_kwarg page = request_data.get(page_kwarg, 1) try: page_number = int(page) except ValueError: if page == 'last': page_number = paginator.num_pages else: raise InvalidPage(_( "Page is not 'last', nor can it be converted to an int.")) # This can also raise an InvalidPage exception. return paginator, paginator.page(page_number) def get_paginator(self, queryset): """ Return a paginator. Override this to set settings like orphans, allow_empty, etc. """ return self.paginator_class(queryset, self.paginate_by) # Accessing the search results and meta data def bulk_fetch_results(self, paginated_results): """ This method gets paginated search results and returns a list of Django objects in the same order. It preserves the order without doing any ordering in Python, even when more than one Django model are returned in the search results. It also uses the same queryset that was used to populate the search queryset, so any select_related/prefetch_related optimisations are in effect. It is heavily based on Haystack's SearchQuerySet.post_process_results, but works on the paginated results instead of all of them. """ objects = [] models_pks = loaded_objects = {} for result in paginated_results: models_pks.setdefault(result.model, []).append(result.pk) search_backend_alias = self.results.query.backend.connection_alias for model in models_pks: ui = connections[search_backend_alias].get_unified_index() index = ui.get_index(model) queryset = index.read_queryset(using=search_backend_alias) loaded_objects[model] = queryset.in_bulk(models_pks[model]) for result in paginated_results: model_objects = loaded_objects.get(result.model, {}) try: result._object = model_objects[int(result.pk)] except KeyError: # The object was either deleted since we indexed or should # be ignored; fail silently. pass else: objects.append(result._object) return objects def get_paginated_objects(self): """ Return a paginated list of Django model instances. The call is cached. """ if hasattr(self, '_objects'): return self._objects else: paginated_results = self.page.object_list self._objects = self.bulk_fetch_results(paginated_results) return self._objects def get_facet_munger(self): return FacetMunger( self.full_path, self.search_form.selected_multi_facets, self.results.facet_counts()) def get_search_context_data(self, context_object_name=None): """ Return metadata about the search in a dictionary useful to populate template contexts. If you pass in a context_object_name, the dictionary will also contain the actual list of found objects. The expected usage is to call this function in your view's get_context_data: search_context = self.search_handler.get_search_context_data( self.context_object_name) context.update(search_context) return context """ # Use the FacetMunger to convert Haystack's awkward facet data into # something the templates can use. # Note that the FacetMunger accesses object_list (unpaginated results), # whereas we use the paginated search results to populate the context # with products munger = self.get_facet_munger() facet_data = munger.facet_data() has_facets = any([data['results'] for data in facet_data.values()]) context = { 'facet_data': facet_data, 'has_facets': has_facets, # This is a serious code smell; we just pass through the selected # facets data to the view again, and the template adds those # as fields to the form. This hack ensures that facets stay # selected when changing relevancy. 'selected_facets': self.request_data.getlist('selected_facets'), 'form': self.search_form, 'paginator': self.paginator, 'page_obj': self.page, } # It's a pretty common pattern to want the actual results in the # context, so pass them in if context_object_name is set. if context_object_name is not None: context[context_object_name] = self.get_paginated_objects() return context
{ "content_hash": "7e0dcedf671431efe3827af1a752f42e", "timestamp": "", "source": "github", "line_count": 235, "max_line_length": 79, "avg_line_length": 36.774468085106385, "alnum_prop": 0.6320296227725064, "repo_name": "ahmetdaglarbas/e-commerce", "id": "d5fec37eb0f0f5332e2e69224530d676515e388a", "size": "8642", "binary": false, "copies": "6", "ref": "refs/heads/tez", "path": "oscar/apps/search/search_handlers.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "928482" }, { "name": "Cucumber", "bytes": "376" }, { "name": "HTML", "bytes": "472297" }, { "name": "JavaScript", "bytes": "416074" }, { "name": "Makefile", "bytes": "5237" }, { "name": "Python", "bytes": "4880161" }, { "name": "Shell", "bytes": "5122" }, { "name": "XSLT", "bytes": "24882" } ], "symlink_target": "" }
"""Common configuration elements for networkzero """ ENCODING = "UTF-8" class _Forever(object): def __repr__(self): return "<Forever>" FOREVER = _Forever() SHORT_WAIT = 1 # 1 second EVERYTHING = "" COMMAND_ACK = "ack" # # Beacons will broadcast adverts at this frequency # BEACON_ADVERT_FREQUENCY_S = 2 # # Adverts will expire after this many seconds unless # a fresh broadcast is received. Default it above the # broadcast frequency so adverts are not forever expiring # and being recreated by the next received broadcast. # # NB since adverts are broadcast round-robin (ie only one advert # is broadcast every BEACON_ADVERT_FREQUENCY_S seconds) we need # to allow for the possibility that any given name might only # be advertised, say, once every 5 times. # ADVERT_TTL_S = 10 * BEACON_ADVERT_FREQUENCY_S VALID_PORTS = range(0x10000) DYNAMIC_PORTS = range(0xC000, 0x10000)
{ "content_hash": "19dacaf3676a5c9b8069f3d16e6c2d58", "timestamp": "", "source": "github", "line_count": 28, "max_line_length": 64, "avg_line_length": 31.428571428571427, "alnum_prop": 0.7465909090909091, "repo_name": "tjguk/networkzero", "id": "af8a47334e9fd5f37f840bbee80828b5f060824e", "size": "904", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "networkzero/config.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "81" }, { "name": "C", "bytes": "2621" }, { "name": "Python", "bytes": "101990" }, { "name": "Shell", "bytes": "2626" } ], "symlink_target": "" }
from __future__ import unicode_literals import httplib import logging from django.core.exceptions import ValidationError from django.db import IntegrityError from django.db import connection from django.db import transaction from flask import request from framework.auth import Auth from framework.sessions import get_session from framework.exceptions import HTTPError from framework.auth.decorators import must_be_signed from osf.exceptions import InvalidTagError, TagNotFoundError from osf.models import FileVersion, OSFUser from osf.utils.requests import check_select_for_update from website.project.decorators import ( must_not_be_registration, must_have_addon, must_have_permission ) from website.project.model import has_anonymous_link from website.files import exceptions from addons.osfstorage import utils from addons.osfstorage import decorators from addons.osfstorage import settings as osf_storage_settings logger = logging.getLogger(__name__) def make_error(code, message_short=None, message_long=None): data = {} if message_short: data['message_short'] = message_short if message_long: data['message_long'] = message_long return HTTPError(code, data=data) @must_be_signed @must_have_addon('osfstorage', 'node') def osfstorage_update_metadata(node_addon, payload, **kwargs): """Metadata received from WaterButler, is built incrementally via latent task calls to this endpoint. The basic metadata response looks like:: { "metadata": { # file upload "name": "file.name", "md5": "d41d8cd98f00b204e9800998ecf8427e", "path": "...", "sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "version": "2", "downloads": "1", "checkout": "...", "modified": "a date", "modified_utc": "a date in utc", # glacier vault "archive": "glacier_key", "vault": "glacier_vault_name", # parity files "parity": { "redundancy": "5", "files": [ {"name": "foo.txt.par2","sha256": "abc123"}, {"name": "foo.txt.vol00+01.par2","sha256": "xyz321"}, ] } }, } """ try: version_id = payload['version'] metadata = payload['metadata'] except KeyError: raise HTTPError(httplib.BAD_REQUEST) if check_select_for_update(): version = FileVersion.objects.filter(_id=version_id).select_for_update().first() else: version = FileVersion.objects.filter(_id=version_id).first() if version is None: raise HTTPError(httplib.NOT_FOUND) version.update_metadata(metadata) return {'status': 'success'} @must_be_signed @decorators.autoload_filenode(must_be='file') def osfstorage_get_revisions(file_node, node_addon, payload, **kwargs): from osf.models import PageCounter, FileVersion # TODO Fix me onces django works is_anon = has_anonymous_link(node_addon.owner, Auth(private_key=request.args.get('view_only'))) counter_prefix = 'download:{}:{}:'.format(file_node.node._id, file_node._id) version_count = file_node.versions.count() # Don't worry. The only % at the end of the LIKE clause, the index is still used counts = dict(PageCounter.objects.filter(_id__startswith=counter_prefix).values_list('_id', 'total')) qs = FileVersion.includable_objects.filter(basefilenode__id=file_node.id).include('creator__guids').order_by('-created') for i, version in enumerate(qs): version._download_count = counts.get('{}{}'.format(counter_prefix, version_count - i - 1), 0) # Return revisions in descending order return { 'revisions': [ utils.serialize_revision(node_addon.owner, file_node, version, index=version_count - idx - 1, anon=is_anon) for idx, version in enumerate(qs) ] } @decorators.waterbutler_opt_hook def osfstorage_copy_hook(source, destination, name=None, **kwargs): return source.copy_under(destination, name=name).serialize(), httplib.CREATED @decorators.waterbutler_opt_hook def osfstorage_move_hook(source, destination, name=None, **kwargs): try: return source.move_under(destination, name=name).serialize(), httplib.OK except exceptions.FileNodeCheckedOutError: raise HTTPError(httplib.METHOD_NOT_ALLOWED, data={ 'message_long': 'Cannot move file as it is checked out.' }) except exceptions.FileNodeIsPrimaryFile: raise HTTPError(httplib.FORBIDDEN, data={ 'message_long': 'Cannot move file as it is the primary file of preprint.' }) @must_be_signed @decorators.autoload_filenode(default_root=True) def osfstorage_get_lineage(file_node, node_addon, **kwargs): lineage = [] while file_node: lineage.append(file_node.serialize()) file_node = file_node.parent return {'data': lineage} @must_be_signed @decorators.autoload_filenode(default_root=True) def osfstorage_get_metadata(file_node, **kwargs): try: # TODO This should change to version as its internal it can be changed anytime version = int(request.args.get('revision')) except (ValueError, TypeError): # If its not a number version = None return file_node.serialize(version=version, include_full=True) @must_be_signed @decorators.autoload_filenode(must_be='folder') def osfstorage_get_children(file_node, **kwargs): from django.contrib.contenttypes.models import ContentType with connection.cursor() as cursor: # Read the documentation on FileVersion's fields before reading this code cursor.execute(''' SELECT json_agg(CASE WHEN F.type = 'osf.osfstoragefile' THEN json_build_object( 'id', F._id , 'path', '/' || F._id , 'name', F.name , 'kind', 'file' , 'size', LATEST_VERSION.size , 'downloads', COALESCE(DOWNLOAD_COUNT, 0) , 'version', (SELECT COUNT(*) FROM osf_basefilenode_versions WHERE osf_basefilenode_versions.basefilenode_id = F.id) , 'contentType', LATEST_VERSION.content_type , 'modified', LATEST_VERSION.created , 'created', EARLIEST_VERSION.created , 'checkout', CHECKOUT_GUID , 'md5', LATEST_VERSION.metadata ->> 'md5' , 'sha256', LATEST_VERSION.metadata ->> 'sha256' ) ELSE json_build_object( 'id', F._id , 'path', '/' || F._id || '/' , 'name', F.name , 'kind', 'folder' ) END ) FROM osf_basefilenode AS F LEFT JOIN LATERAL ( SELECT * FROM osf_fileversion JOIN osf_basefilenode_versions ON osf_fileversion.id = osf_basefilenode_versions.fileversion_id WHERE osf_basefilenode_versions.basefilenode_id = F.id ORDER BY created DESC LIMIT 1 ) LATEST_VERSION ON TRUE LEFT JOIN LATERAL ( SELECT * FROM osf_fileversion JOIN osf_basefilenode_versions ON osf_fileversion.id = osf_basefilenode_versions.fileversion_id WHERE osf_basefilenode_versions.basefilenode_id = F.id ORDER BY created ASC LIMIT 1 ) EARLIEST_VERSION ON TRUE LEFT JOIN LATERAL ( SELECT _id from osf_guid WHERE object_id = F.checkout_id AND content_type_id = %s LIMIT 1 ) CHECKOUT_GUID ON TRUE LEFT JOIN LATERAL ( SELECT P.total AS DOWNLOAD_COUNT FROM osf_pagecounter AS P WHERE P._id = 'download:' || %s || ':' || F._id LIMIT 1 ) DOWNLOAD_COUNT ON TRUE WHERE parent_id = %s AND (NOT F.type IN ('osf.trashedfilenode', 'osf.trashedfile', 'osf.trashedfolder')) ''', [ContentType.objects.get_for_model(OSFUser).id, file_node.node._id, file_node.id]) return cursor.fetchone()[0] or [] @must_be_signed @must_not_be_registration @decorators.autoload_filenode(must_be='folder') def osfstorage_create_child(file_node, payload, node_addon, **kwargs): parent = file_node # Just for clarity name = payload.get('name') user = OSFUser.load(payload.get('user')) is_folder = payload.get('kind') == 'folder' if not (name or user) or '/' in name: raise HTTPError(httplib.BAD_REQUEST) if file_node.node.is_quickfiles and is_folder: raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'You may not create a folder for QuickFiles'}) try: # Create a save point so that we can rollback and unlock # the parent record with transaction.atomic(): if is_folder: created, file_node = True, parent.append_folder(name) else: created, file_node = True, parent.append_file(name) except (ValidationError, IntegrityError): created, file_node = False, parent.find_child_by_name(name, kind=int(not is_folder)) if not created and is_folder: raise HTTPError(httplib.CONFLICT, data={ 'message_long': 'Cannot create folder "{name}" because a file or folder already exists at path "{path}"'.format( name=file_node.name, path=file_node.materialized_path, ) }) if not is_folder: try: if file_node.checkout is None or file_node.checkout._id == user._id: version = file_node.create_version( user, dict(payload['settings'], **dict( payload['worker'], **{ 'object': payload['metadata']['name'], 'service': payload['metadata']['provider'], }) ), dict(payload['metadata'], **payload['hashes']) ) version_id = version._id archive_exists = version.archive is not None else: raise HTTPError(httplib.FORBIDDEN, data={ 'message_long': 'File cannot be updated due to checkout status.' }) except KeyError: raise HTTPError(httplib.BAD_REQUEST) else: version_id = None archive_exists = False return { 'status': 'success', 'archive': not archive_exists, # Should waterbutler also archive this file 'data': file_node.serialize(), 'version': version_id, }, httplib.CREATED if created else httplib.OK @must_be_signed @must_not_be_registration @decorators.autoload_filenode() def osfstorage_delete(file_node, payload, node_addon, **kwargs): user = OSFUser.load(payload['user']) auth = Auth(user) #TODO Auth check? if not auth: raise HTTPError(httplib.BAD_REQUEST) if file_node == node_addon.get_root(): raise HTTPError(httplib.BAD_REQUEST) try: file_node.delete(user=user) except exceptions.FileNodeCheckedOutError: raise HTTPError(httplib.FORBIDDEN) except exceptions.FileNodeIsPrimaryFile: raise HTTPError(httplib.FORBIDDEN, data={ 'message_long': 'Cannot delete file as it is the primary file of preprint.' }) return {'status': 'success'} @must_be_signed @decorators.autoload_filenode(must_be='file') def osfstorage_download(file_node, payload, node_addon, **kwargs): # Set user ID in session data for checking if user is contributor # to project. user_id = payload.get('user') if user_id: current_session = get_session() current_session.data['auth_user_id'] = user_id current_session.save() if not request.args.get('version'): version_id = None else: try: version_id = int(request.args['version']) except ValueError: raise make_error(httplib.BAD_REQUEST, message_short='Version must be an integer if not specified') version = file_node.get_version(version_id, required=True) if request.args.get('mode') not in ('render', ): utils.update_analytics(node_addon.owner, file_node._id, int(version.identifier) - 1) return { 'data': { 'name': file_node.name, 'path': version.location_hash, }, 'settings': { osf_storage_settings.WATERBUTLER_RESOURCE: version.location[osf_storage_settings.WATERBUTLER_RESOURCE], }, } @must_have_permission('write') @decorators.autoload_filenode(must_be='file') def osfstorage_add_tag(file_node, **kwargs): data = request.get_json() if file_node.add_tag(data['tag'], kwargs['auth']): return {'status': 'success'}, httplib.OK return {'status': 'failure'}, httplib.BAD_REQUEST @must_have_permission('write') @decorators.autoload_filenode(must_be='file') def osfstorage_remove_tag(file_node, **kwargs): data = request.get_json() try: file_node.remove_tag(data['tag'], kwargs['auth']) except TagNotFoundError: return {'status': 'failure'}, httplib.CONFLICT except InvalidTagError: return {'status': 'failure'}, httplib.BAD_REQUEST else: return {'status': 'success'}, httplib.OK
{ "content_hash": "87cd0351ec4676a4b5ee9e2b60a815f4", "timestamp": "", "source": "github", "line_count": 378, "max_line_length": 140, "avg_line_length": 36.74867724867725, "alnum_prop": 0.5985890144698006, "repo_name": "laurenrevere/osf.io", "id": "49afd63647bc6358e2f8dfd0e18707b3f4650baf", "size": "13891", "binary": false, "copies": "5", "ref": "refs/heads/develop", "path": "addons/osfstorage/views.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "110148" }, { "name": "HTML", "bytes": "228999" }, { "name": "JavaScript", "bytes": "1809805" }, { "name": "Mako", "bytes": "642995" }, { "name": "Python", "bytes": "7692214" }, { "name": "VCL", "bytes": "13885" } ], "symlink_target": "" }
"""Search brewerydb.com for information about beers.""" import plumeria.util.http as http from plumeria import config from plumeria.command import commands, CommandError from plumeria.command.parse import Text from plumeria.message.mappings import build_mapping from plumeria.plugin import PluginSetupError from plumeria.util.collections import SafeStructure from plumeria.util.ratelimit import rate_limit api_key = config.create("brewerydb", "key", fallback="", comment="An API key from brewerydb.com") @commands.create("beer", category="Search", params=[Text('query')]) @rate_limit(burst_size=4) async def beer_search(message, query): """ Search for a beer using brewerydb.com. Example:: beer indian pale ale Response:: Amnesia I.P.A. ABV: 7.2% IBU: 55 Style: American-Style India Pale Ale Description: Named for the beer that was shipped to Her Majesty’s [...] """ r = await http.get("http://api.brewerydb.com/v2/search", params={ "q": query, "type": "beer", "key": api_key() }) results = SafeStructure(r.json()) beer = results.data[0] if not beer: raise CommandError("Beer not found on brewerydb.com.") props = [ ('Name', beer.name) ] if results.abv: props.append(("ABV", results.abv)) if results.ibu: props.append(("IBU", results.ibu)) if beer.style: props.append(("Style", beer.style.name.strip())) if beer.description: props.append(("Description", beer.description.strip())) if beer.foodPairings: props.append(("Food pairings", beer.foodPairings.strip())) return build_mapping(props) def setup(): config.add(api_key) if not api_key(): raise PluginSetupError("This plugin requires an API key from https://brewerydb.com. Registration is free.") commands.add(beer_search)
{ "content_hash": "0f18ee4af249b61b0ed81bf2cb9901e7", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 115, "avg_line_length": 28, "alnum_prop": 0.6408163265306123, "repo_name": "sk89q/Plumeria", "id": "06598712a3bd149d6a4254c8232800c63bd178e9", "size": "1962", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "orchard/beer.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "410" }, { "name": "CSS", "bytes": "3094" }, { "name": "HTML", "bytes": "10031" }, { "name": "JavaScript", "bytes": "490" }, { "name": "Python", "bytes": "435373" } ], "symlink_target": "" }
import numpy as np import pandas as pd from dask.distributed import Client from scipy import stats from sklearn.svm import SVC import dask_ml.model_selection as dms def test_search_basic(xy_classification): X, y = xy_classification param_grid = {"class_weight": [None, "balanced"]} a = dms.GridSearchCV(SVC(kernel="rbf", gamma=0.1), param_grid) a.fit(X, y) param_dist = {"C": stats.uniform} b = dms.RandomizedSearchCV(SVC(kernel="rbf", gamma=0.1), param_dist) b.fit(X, y) def test_to_keys_numpy_array(): rng = np.random.RandomState(0) arr = rng.randn(20, 30) df = pd.DataFrame(data=arr) dsk = {} grid_search_keys = list(dms.utils.to_keys(dsk, arr, df)) with Client() as client: data_futures = client.scatter([arr, df]) assert grid_search_keys == [f.key for f in data_futures]
{ "content_hash": "3c3c54cd920f429ee59e29d2bec24c67", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 72, "avg_line_length": 28.233333333333334, "alnum_prop": 0.6611570247933884, "repo_name": "dask/dask-ml", "id": "c78dcf036eabe63d6e5c8ae95a757e00d9d8d106", "size": "847", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tests/test_model_selection.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "798280" }, { "name": "Shell", "bytes": "633" } ], "symlink_target": "" }
""" Created on Sun Sep 11 19:29:11 2016 @author: DIP """ from normalization import normalize_corpus from utils import build_feature_matrix import numpy as np toy_corpus = ['The sky is blue', 'The sky is blue and beautiful', 'Look at the bright blue sky!', 'Python is a great Programming language', 'Python and Java are popular Programming languages', 'Among Programming languages, both Python and Java are the most used in Analytics', 'The fox is quicker than the lazy dog', 'The dog is smarter than the fox', 'The dog, fox and cat are good friends'] query_docs = ['The fox is definitely smarter than the dog', 'Java is a static typed programming language unlike Python', 'I love to relax under the beautiful blue sky!'] # normalize and extract features from the toy corpus norm_corpus = normalize_corpus(toy_corpus, lemmatize=True) tfidf_vectorizer, tfidf_features = build_feature_matrix(norm_corpus, feature_type='tfidf', ngram_range=(1, 1), min_df=0.0, max_df=1.0) # normalize and extract features from the query corpus norm_query_docs = normalize_corpus(query_docs, lemmatize=True) query_docs_tfidf = tfidf_vectorizer.transform(norm_query_docs) def compute_cosine_similarity(doc_features, corpus_features, top_n=3): # get document vectors doc_features = doc_features.toarray()[0] corpus_features = corpus_features.toarray() # compute similarities similarity = np.dot(doc_features, corpus_features.T) # get docs with highest similarity scores top_docs = similarity.argsort()[::-1][:top_n] top_docs_with_score = [(index, round(similarity[index], 3)) for index in top_docs] return top_docs_with_score print 'Document Similarity Analysis using Cosine Similarity' print '='*60 for index, doc in enumerate(query_docs): doc_tfidf = query_docs_tfidf[index] top_similar_docs = compute_cosine_similarity(doc_tfidf, tfidf_features, top_n=2) print 'Document',index+1 ,':', doc print 'Top', len(top_similar_docs), 'similar docs:' print '-'*40 for doc_index, sim_score in top_similar_docs: print 'Doc num: {} Similarity Score: {}\nDoc: {}'.format(doc_index+1, sim_score, toy_corpus[doc_index]) print '-'*40 print def compute_hellinger_bhattacharya_distance(doc_features, corpus_features, top_n=3): # get document vectors doc_features = doc_features.toarray()[0] corpus_features = corpus_features.toarray() # compute hb distances distance = np.hstack( np.sqrt(0.5 * np.sum( np.square(np.sqrt(doc_features) - np.sqrt(corpus_features)), axis=1))) # get docs with lowest distance scores top_docs = distance.argsort()[:top_n] top_docs_with_score = [(index, round(distance[index], 3)) for index in top_docs] return top_docs_with_score print 'Document Similarity Analysis using Hellinger-Bhattacharya distance' print '='*60 for index, doc in enumerate(query_docs): doc_tfidf = query_docs_tfidf[index] top_similar_docs = compute_hellinger_bhattacharya_distance(doc_tfidf, tfidf_features, top_n=2) print 'Document',index+1 ,':', doc print 'Top', len(top_similar_docs), 'similar docs:' print '-'*40 for doc_index, sim_score in top_similar_docs: print 'Doc num: {} Distance Score: {}\nDoc: {}'.format(doc_index+1, sim_score, toy_corpus[doc_index]) print '-'*40 print import scipy.sparse as sp def compute_corpus_term_idfs(corpus_features, norm_corpus): dfs = np.diff(sp.csc_matrix(corpus_features, copy=True).indptr) dfs = 1 + dfs # to smoothen idf later total_docs = 1 + len(norm_corpus) idfs = 1.0 + np.log(float(total_docs) / dfs) return idfs def compute_bm25_similarity(doc_features, corpus_features, corpus_doc_lengths, avg_doc_length, term_idfs, k1=1.5, b=0.75, top_n=3): # get corpus bag of words features corpus_features = corpus_features.toarray() # convert query document features to binary features # this is to keep a note of which terms exist per document doc_features = doc_features.toarray()[0] doc_features[doc_features >= 1] = 1 # compute the document idf scores for present terms doc_idfs = doc_features * term_idfs # compute numerator expression in BM25 equation numerator_coeff = corpus_features * (k1 + 1) numerator = np.multiply(doc_idfs, numerator_coeff) # compute denominator expression in BM25 equation denominator_coeff = k1 * (1 - b + (b * (corpus_doc_lengths / avg_doc_length))) denominator_coeff = np.vstack(denominator_coeff) denominator = corpus_features + denominator_coeff # compute the BM25 score combining the above equations bm25_scores = np.sum(np.divide(numerator, denominator), axis=1) # get top n relevant docs with highest BM25 score top_docs = bm25_scores.argsort()[::-1][:top_n] top_docs_with_score = [(index, round(bm25_scores[index], 3)) for index in top_docs] return top_docs_with_score vectorizer, corpus_features = build_feature_matrix(norm_corpus, feature_type='frequency') query_docs_features = vectorizer.transform(norm_query_docs) doc_lengths = [len(doc.split()) for doc in norm_corpus] avg_dl = np.average(doc_lengths) corpus_term_idfs = compute_corpus_term_idfs(corpus_features, norm_corpus) print 'Document Similarity Analysis using BM25' print '='*60 for index, doc in enumerate(query_docs): doc_features = query_docs_features[index] top_similar_docs = compute_bm25_similarity(doc_features, corpus_features, doc_lengths, avg_dl, corpus_term_idfs, k1=1.5, b=0.75, top_n=2) print 'Document',index+1 ,':', doc print 'Top', len(top_similar_docs), 'similar docs:' print '-'*40 for doc_index, sim_score in top_similar_docs: print 'Doc num: {} BM25 Score: {}\nDoc: {}'.format(doc_index+1, sim_score, toy_corpus[doc_index]) print '-'*40 print
{ "content_hash": "d8e4194b0314d1b2dc8885ee186c8d28", "timestamp": "", "source": "github", "line_count": 180, "max_line_length": 89, "avg_line_length": 43.27777777777778, "alnum_prop": 0.524390243902439, "repo_name": "dipanjanS/text-analytics-with-python", "id": "2e7510e1514f3ce9861ec4793cbaf35ff64a70ca", "size": "7814", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Old-First-Edition/Ch06_Text_Similarity_and_Clustering/document_similarity.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "51661" }, { "name": "Python", "bytes": "174548" } ], "symlink_target": "" }