text
stringlengths
6
947k
repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
# ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- import pandas as pd from subprocess import Popen, PIPE import logging logger = logging.getLogger(__name__) def system_call(cmd): """Call command and return (stdout, stderr, return_value) Parameters ---------- cmd : str or iterator of str The string containing the command to be run, or a sequence of strings that are the tokens of the command. Returns ------- str, str, int - The standard output of the command - The standard error of the command - The exit status of the command Notes ----- This function is ported from QIIME (http://www.qiime.org), previously named qiime_system_call. QIIME is a GPL project, but we obtained permission from the authors of this function to port it to Qiita and keep it under BSD license. """ logger.debug('Entered system_call()') # TODO: This may need to be reviewed against the Qiita version of this # method. proc = Popen(cmd, universal_newlines=True, shell=True, stdout=PIPE, stderr=PIPE) # Communicate pulls all stdout/stderr from the PIPEs # This call blocks until the command is done stdout, stderr = proc.communicate() return_value = proc.returncode return stdout, stderr, return_value def get_sample_names_by_run_prefix(mapping_file): """Generates a dictionary of run_prefix and sample names Parameters ---------- mapping_file : str The mapping file Returns ------- dict Dict mapping run_prefix to sample id Raises ------ ValueError If there is more than 1 sample per run_prefix """ logger.debug('Entered get_sample_names_by_run_prefix()') qiime_map = pd.read_csv(mapping_file, delimiter='\t', dtype=str, encoding='utf-8', keep_default_na=False, na_values=[]) qiime_map.set_index('#SampleID', inplace=True) samples = {} errors = [] for prefix, df in qiime_map.groupby('run_prefix'): len_df = len(df) if len_df != 1: errors.append('%s has %d samples (%s)' % (prefix, len_df, ', '.join(df.index))) else: samples[prefix] = df.index.values[0] if errors: raise ValueError("You have run_prefix values with multiple " "samples: %s" % ' -- '.join(errors)) return samples
qiita-spots/qiita_client
qiita_client/util.py
Python
bsd-3-clause
2,813
0
from libsaas import http, parsers from libsaas.services import base class Products(base.RESTResource): path = 'products' @base.apimethod def get(self, start=None, limit=None): """ Lists products attached to a deal. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json @base.apimethod def delete(self, product_attachment_id): """ Deletes a product attachment from a deal, using the product_attachment_id. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) request = http.Request('DELETE', self.get_url(), params) return request, parsers.parse_json class DealsResource(base.RESTResource): path = 'deals' class Deals(DealsResource): @base.apimethod def get(self, filter_id=None, start=None, limit=None, sort_by=None, sort_mode=None, owned_by_you=None): """ Returns all deals Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) return http.Request('GET', self.get_url(), params), parsers.parse_json @base.apimethod def delete(self, ids): """ Marks multiple deals as deleted. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) request = http.Request('DELETE', self.get_url(), params) return request, parsers.parse_json @base.apimethod def find(self, term): """ Searches all deals by their title. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) url = '{0}/find'.format(self.get_url()) return http.Request('GET', url, params), parsers.parse_json @base.apimethod def timeline(self, start_date, interval, amount, field_key, user_id=None, pipeline_id=None, filter_id=None): """ Returns open and won deals, grouped by defined interval of time set in a date-type dealField (field_key) - e.g. when month is the chosen interval, and 3 months are asked starting from January 1st, 2012, deals are returned grouped into 3 groups - January, February and March - based on the value of the given field_key. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) url = '{0}/timeline'.format(self.get_url()) return http.Request('GET', url, params), parsers.parse_json class Deal(DealsResource): @base.apimethod def activities(self, start=None, limit=None, done=None, exclude=None): """ Lists activities associated with a deal. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) url = '{0}/activities'.format(self.get_url()) return http.Request('GET', url, params), parsers.parse_json @base.apimethod def followers(self): """ Lists the followers of a deal. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ url = '{0}/followers'.format(self.get_url()) return http.Request('GET', url), parsers.parse_json @base.apimethod def updates(self, start=None, limit=None): """ Lists updates about a deal. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) url = '{0}/updates'.format(self.get_url()) return http.Request('GET', url, params), parsers.parse_json @base.apimethod def participants(self, start=None, limit=None): """ Lists participants associated with a deal. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) url = '{0}/participants'.format(self.get_url()) return http.Request('GET', url, params), parsers.parse_json @base.apimethod def files(self, start=None, limit=None): """ Lists files associated with a deal. Upstream documentation: https://developers.pipedrive.com/v1#methods-Deals """ params = base.get_params(None, locals()) url = '{0}/files'.format(self.get_url()) return http.Request('GET', url, params), parsers.parse_json @base.resource(Products) def products(self): """ Returns the resource corresponding to the deal products """ return Products(self) class DealFieldsResource(base.RESTResource): path = 'dealFields' def update(self, *args, **kwargs): raise base.MethodNotSupported() class DealFields(DealFieldsResource): @base.apimethod def delete(self, ids): """ Marks multiple activities as deleted. Upstream documentation: https://developers.pipedrive.com/v1#methods-DealFields """ params = base.get_params(None, locals()) request = http.Request('DELETE', self.get_url(), params) return request, parsers.parse_json class DealField(DealFieldsResource): pass
ducksboard/libsaas
libsaas/services/pipedrive/deals.py
Python
mit
5,710
0
"""Module with views for the employee feature.""" from django.contrib import messages from django.contrib.auth import logout from django.contrib.auth.decorators import login_required, permission_required from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404, redirect, render from .forms import EmployeeForm from .models import Employee @login_required def employees_logout_employee(request): """Logout a user, default behaviour.""" logout(request) return render(request, 'employees/logout.html') @permission_required('employees.add_employee') def employees_new_employee(request): """Create a new employee.""" form = EmployeeForm(request.POST or None, caffe=request.user.caffe) if form.is_valid(): form.save() messages.success(request, 'Pracownik został poprawnie stworzony.') return redirect(reverse('employees:navigate')) elif request.POST: messages.error(request, u'Formularz został niepoprawnie wypełniony.') return render(request, 'employees/new.html', { 'form': form }) @permission_required('employees.change_employee') def employees_edit_employee(request, employee_id): """Edit an employee.""" employee = get_object_or_404( Employee, id=employee_id, caffe=request.user.caffe ) form = EmployeeForm( request.POST or None, instance=employee, caffe=request.user.caffe ) if form.is_valid(): form.save() messages.success(request, 'Pracownik został poprawnie zmieniony.') return redirect(reverse('employees:navigate')) elif request.POST: messages.error(request, u'Formularz został niepoprawnie wypełniony.') return render(request, 'employees/edit.html', { 'form': form, 'employee': employee }) @permission_required('employees.delete_employee') def employees_delete_employee(request, employee_id): """Delete an employee.""" employee = get_object_or_404( Employee, id=employee_id, caffe=request.user.caffe ) if employee == request.user: messages.error(request, u'Nie możesz usunąć siebie.') return redirect(reverse('employees:navigate')) employee.delete() messages.success(request, u'Pracownik został poprawnie usunięty.') return redirect(reverse('employees:navigate')) @permission_required('employees.view_employee') def employees_show_all_employees(request): """Show all employees.""" employees = Employee.objects.filter(caffe=request.user.caffe).all() return render(request, 'employees/all.html', { 'employees': employees }) @permission_required('employees.view_employee') def employees_navigate(request): """Show main employee page.""" return render(request, 'home/employees.html')
VirrageS/io-kawiarnie
caffe/employees/views.py
Python
mit
2,869
0
# Default Django settings. Override these with settings in the module # pointed-to by the DJANGO_SETTINGS_MODULE environment variable. # This is defined here as a do-nothing function because we can't import # django.utils.translation -- that module depends on the settings. gettext_noop = lambda s: s #################### # CORE # #################### DEBUG = False TEMPLATE_DEBUG = False # Whether the framework should propagate raw exceptions rather than catching # them. This is useful under some testing siutations and should never be used # on a live site. DEBUG_PROPAGATE_EXCEPTIONS = False # Whether to use the "Etag" header. This saves bandwidth but slows down performance. USE_ETAGS = False # People who get code error notifications. # In the format (('Full Name', 'email@domain.com'), ('Full Name', 'anotheremail@domain.com')) ADMINS = () # Tuple of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = () # Local time zone for this installation. All choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all # systems may support all possibilities). TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # Languages we provide translations for, out of the box. The language name # should be the utf-8 encoded local name for the language. LANGUAGES = ( ('ar', gettext_noop('Arabic')), ('bn', gettext_noop('Bengali')), ('bg', gettext_noop('Bulgarian')), ('ca', gettext_noop('Catalan')), ('cs', gettext_noop('Czech')), ('cy', gettext_noop('Welsh')), ('da', gettext_noop('Danish')), ('de', gettext_noop('German')), ('el', gettext_noop('Greek')), ('en', gettext_noop('English')), ('es', gettext_noop('Spanish')), ('et', gettext_noop('Estonian')), ('es-ar', gettext_noop('Argentinean Spanish')), ('eu', gettext_noop('Basque')), ('fa', gettext_noop('Persian')), ('fi', gettext_noop('Finnish')), ('fr', gettext_noop('French')), ('ga', gettext_noop('Irish')), ('gl', gettext_noop('Galician')), ('hu', gettext_noop('Hungarian')), ('he', gettext_noop('Hebrew')), ('hi', gettext_noop('Hindi')), ('hr', gettext_noop('Croatian')), ('is', gettext_noop('Icelandic')), ('it', gettext_noop('Italian')), ('ja', gettext_noop('Japanese')), ('ka', gettext_noop('Georgian')), ('ko', gettext_noop('Korean')), ('km', gettext_noop('Khmer')), ('kn', gettext_noop('Kannada')), ('lv', gettext_noop('Latvian')), ('lt', gettext_noop('Lithuanian')), ('mk', gettext_noop('Macedonian')), ('nl', gettext_noop('Dutch')), ('no', gettext_noop('Norwegian')), ('pl', gettext_noop('Polish')), ('pt', gettext_noop('Portuguese')), ('pt-br', gettext_noop('Brazilian Portuguese')), ('ro', gettext_noop('Romanian')), ('ru', gettext_noop('Russian')), ('sk', gettext_noop('Slovak')), ('sl', gettext_noop('Slovenian')), ('sr', gettext_noop('Serbian')), ('sv', gettext_noop('Swedish')), ('ta', gettext_noop('Tamil')), ('te', gettext_noop('Telugu')), ('th', gettext_noop('Thai')), ('tr', gettext_noop('Turkish')), ('uk', gettext_noop('Ukrainian')), ('zh-cn', gettext_noop('Simplified Chinese')), ('zh-tw', gettext_noop('Traditional Chinese')), ) # Languages using BiDi (right-to-left) layout LANGUAGES_BIDI = ("he", "ar", "fa") # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True LOCALE_PATHS = () LANGUAGE_COOKIE_NAME = 'django_language' # Not-necessarily-technical managers of the site. They get broken link # notifications and other various e-mails. MANAGERS = ADMINS # Default content type and charset to use for all HttpResponse objects, if a # MIME type isn't manually specified. These are used to construct the # Content-Type header. DEFAULT_CONTENT_TYPE = 'text/html' DEFAULT_CHARSET = 'utf-8' # Encoding of files read from disk (template and initial SQL files). FILE_CHARSET = 'utf-8' # E-mail address that error messages come from. SERVER_EMAIL = 'root@localhost' # Whether to send broken-link e-mails. SEND_BROKEN_LINK_EMAILS = False # Database connection info. DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. DATABASE_NAME = '' # Or path to database file if using sqlite3. DATABASE_USER = '' # Not used with sqlite3. DATABASE_PASSWORD = '' # Not used with sqlite3. DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3. DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3. DATABASE_OPTIONS = {} # Set to empty dictionary for default. # Host for sending e-mail. EMAIL_HOST = 'localhost' # Port for sending e-mail. EMAIL_PORT = 25 # Optional SMTP authentication information for EMAIL_HOST. EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False # List of strings representing installed apps. INSTALLED_APPS = () # List of locations of the template source files, in search order. TEMPLATE_DIRS = () # List of callables that know how to import templates from various sources. # See the comments in django/core/template/loader.py for interface # documentation. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.load_template_source', 'django.template.loaders.app_directories.load_template_source', # 'django.template.loaders.eggs.load_template_source', ) # List of processors used by RequestContext to populate the context. # Each one should be a callable that takes the request object as its # only parameter and returns a dictionary to add to the context. TEMPLATE_CONTEXT_PROCESSORS = ( 'django.core.context_processors.auth', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', # 'django.core.context_processors.request', ) # Output to use in template system for invalid (e.g. misspelled) variables. TEMPLATE_STRING_IF_INVALID = '' # URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a # trailing slash. # Examples: "http://foo.com/media/", "/media/". ADMIN_MEDIA_PREFIX = '/media/' # Default e-mail address to use for various automated correspondence from # the site managers. DEFAULT_FROM_EMAIL = 'webmaster@localhost' # Subject-line prefix for email messages send with django.core.mail.mail_admins # or ...mail_managers. Make sure to include the trailing space. EMAIL_SUBJECT_PREFIX = '[Django] ' # Whether to append trailing slashes to URLs. APPEND_SLASH = True # Whether to prepend the "www." subdomain to URLs that don't have it. PREPEND_WWW = False # Override the server-derived value of SCRIPT_NAME FORCE_SCRIPT_NAME = None # List of compiled regular expression objects representing User-Agent strings # that are not allowed to visit any page, systemwide. Use this for bad # robots/crawlers. Here are a few examples: # import re # DISALLOWED_USER_AGENTS = ( # re.compile(r'^NaverBot.*'), # re.compile(r'^EmailSiphon.*'), # re.compile(r'^SiteSucker.*'), # re.compile(r'^sohu-search') # ) DISALLOWED_USER_AGENTS = () ABSOLUTE_URL_OVERRIDES = {} # Tuple of strings representing allowed prefixes for the {% ssi %} tag. # Example: ('/home/html', '/var/www') ALLOWED_INCLUDE_ROOTS = () # If this is a admin settings module, this should be a list of # settings modules (in the format 'foo.bar.baz') for which this admin # is an admin. ADMIN_FOR = () # 404s that may be ignored. IGNORABLE_404_STARTS = ('/cgi-bin/', '/_vti_bin', '/_vti_inf') IGNORABLE_404_ENDS = ('mail.pl', 'mailform.pl', 'mail.cgi', 'mailform.cgi', 'favicon.ico', '.php') # A secret key for this particular Django installation. Used in secret-key # hashing algorithms. Set this in your settings, or Django will complain # loudly. SECRET_KEY = '' # Default file storage mechanism that holds media. DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' # Absolute path to the directory that holds media. # Example: "/home/media/media.lawrence.com/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. # Example: "http://media.lawrence.com" MEDIA_URL = '' # List of upload handler classes to be applied in order. FILE_UPLOAD_HANDLERS = ( 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ) # Maximum size, in bytes, of a request before it will be streamed to the # file system instead of into memory. FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Directory in which upload streamed files will be temporarily saved. A value of # `None` will make Django use the operating system's default temporary directory # (i.e. "/tmp" on *nix systems). FILE_UPLOAD_TEMP_DIR = None # The numeric mode to set newly-uploaded files to. The value should be a mode # you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html. FILE_UPLOAD_PERMISSIONS = None # Default formatting for date objects. See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now DATE_FORMAT = 'N j, Y' # Default formatting for datetime objects. See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now DATETIME_FORMAT = 'N j, Y, P' # Default formatting for time objects. See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now TIME_FORMAT = 'P' # Default formatting for date objects when only the year and month are relevant. # See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now YEAR_MONTH_FORMAT = 'F Y' # Default formatting for date objects when only the month and day are relevant. # See all available format strings here: # http://docs.djangoproject.com/en/dev/ref/templates/builtins/#now MONTH_DAY_FORMAT = 'F j' # Do you want to manage transactions manually? # Hint: you really don't! TRANSACTIONS_MANAGED = False # The User-Agent string to use when checking for URL validity through the # isExistingURL validator. from django import get_version URL_VALIDATOR_USER_AGENT = "Django/%s (http://www.djangoproject.com)" % get_version() # The tablespaces to use for each model when not specified otherwise. DEFAULT_TABLESPACE = '' DEFAULT_INDEX_TABLESPACE = '' ############## # MIDDLEWARE # ############## # List of middleware classes to use. Order is important; in the request phase, # this middleware classes will be applied in the order given, and in the # response phase the middleware will be applied in reverse order. MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', # 'django.middleware.http.ConditionalGetMiddleware', # 'django.middleware.gzip.GZipMiddleware', ) ############ # SESSIONS # ############ SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want. SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks). SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie. SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only). SESSION_COOKIE_PATH = '/' # The path of the session cookie. SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request. SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed. SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default. ######### # CACHE # ######### # The cache backend to use. See the docstring in django.core.cache for the # possible values. CACHE_BACKEND = 'locmem://' CACHE_MIDDLEWARE_KEY_PREFIX = '' CACHE_MIDDLEWARE_SECONDS = 600 #################### # COMMENTS # #################### COMMENTS_ALLOW_PROFANITIES = False # The profanities that will trigger a validation error in the # 'hasNoProfanities' validator. All of these should be in lowercase. PROFANITIES_LIST = ('asshat', 'asshead', 'asshole', 'cunt', 'fuck', 'gook', 'nigger', 'shit') # The group ID that designates which users are banned. # Set to None if you're not using it. COMMENTS_BANNED_USERS_GROUP = None # The group ID that designates which users can moderate comments. # Set to None if you're not using it. COMMENTS_MODERATORS_GROUP = None # The group ID that designates the users whose comments should be e-mailed to MANAGERS. # Set to None if you're not using it. COMMENTS_SKETCHY_USERS_GROUP = None # The system will e-mail MANAGERS the first COMMENTS_FIRST_FEW comments by each # user. Set this to 0 if you want to disable it. COMMENTS_FIRST_FEW = 0 # A tuple of IP addresses that have been banned from participating in various # Django-powered features. BANNED_IPS = () ################## # AUTHENTICATION # ################## AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',) LOGIN_URL = '/accounts/login/' LOGOUT_URL = '/accounts/logout/' LOGIN_REDIRECT_URL = '/accounts/profile/' # The number of days a password reset link is valid for PASSWORD_RESET_TIMEOUT_DAYS = 3 ########### # TESTING # ########### # The name of the method to use to invoke the test suite TEST_RUNNER = 'django.test.simple.run_tests' # The name of the database to use for testing purposes. # If None, a name of 'test_' + DATABASE_NAME will be assumed TEST_DATABASE_NAME = None # Strings used to set the character set and collation order for the test # database. These values are passed literally to the server, so they are # backend-dependent. If None, no special settings are sent (system defaults are # used). TEST_DATABASE_CHARSET = None TEST_DATABASE_COLLATION = None ############ # FIXTURES # ############ # The list of directories to search for fixtures FIXTURE_DIRS = ()
CollabQ/CollabQ
vendor/django/conf/global_settings.py
Python
apache-2.0
14,562
0.00206
'''This module contains the ComplexityVisitor class which is where all the analysis concerning Cyclomatic Complexity is done. There is also the class HalsteadVisitor, that counts Halstead metrics.''' import ast import collections import operator # Helper functions to use in combination with map() GET_COMPLEXITY = operator.attrgetter('complexity') GET_REAL_COMPLEXITY = operator.attrgetter('real_complexity') NAMES_GETTER = operator.attrgetter('name', 'asname') GET_ENDLINE = operator.attrgetter('endline') BaseFunc = collections.namedtuple( 'Function', [ 'name', 'lineno', 'col_offset', 'endline', 'is_method', 'classname', 'closures', 'complexity', ], ) BaseClass = collections.namedtuple( 'Class', [ 'name', 'lineno', 'col_offset', 'endline', 'methods', 'inner_classes', 'real_complexity', ], ) def code2ast(source): '''Convert a string object into an AST object. This function is retained for backwards compatibility, but it no longer attemps any conversions. It's equivalent to a call to ``ast.parse``. ''' return ast.parse(source) class Function(BaseFunc): '''Object represeting a function block.''' @property def letter(self): '''The letter representing the function. It is `M` if the function is actually a method, `F` otherwise. ''' return 'M' if self.is_method else 'F' @property def fullname(self): '''The full name of the function. If it is a method, then the full name is: {class name}.{method name} Otherwise it is just the function name. ''' if self.classname is None: return self.name return '{0}.{1}'.format(self.classname, self.name) def __str__(self): '''String representation of a function block.''' return '{0} {1}:{2}->{3} {4} - {5}'.format( self.letter, self.lineno, self.col_offset, self.endline, self.fullname, self.complexity, ) class Class(BaseClass): '''Object representing a class block.''' letter = 'C' @property def fullname(self): '''The full name of the class. It is just its name. This attribute exists for consistency (see :data:`Function.fullname`). ''' return self.name @property def complexity(self): '''The average complexity of the class. It corresponds to the average complexity of its methods plus one. ''' if not self.methods: return self.real_complexity methods = len(self.methods) return int(self.real_complexity / float(methods)) + (methods > 1) def __str__(self): '''String representation of a class block.''' return '{0} {1}:{2}->{3} {4} - {5}'.format( self.letter, self.lineno, self.col_offset, self.endline, self.name, self.complexity, ) class CodeVisitor(ast.NodeVisitor): '''Base class for every NodeVisitors in `radon.visitors`. It implements a couple utility class methods and a static method. ''' @staticmethod def get_name(obj): '''Shorthand for ``obj.__class__.__name__``.''' return obj.__class__.__name__ @classmethod def from_code(cls, code, **kwargs): '''Instanciate the class from source code (string object). The `**kwargs` are directly passed to the `ast.NodeVisitor` constructor. ''' return cls.from_ast(code2ast(code), **kwargs) @classmethod def from_ast(cls, ast_node, **kwargs): '''Instantiate the class from an AST node. The `**kwargs` are directly passed to the `ast.NodeVisitor` constructor. ''' visitor = cls(**kwargs) visitor.visit(ast_node) return visitor class ComplexityVisitor(CodeVisitor): '''A visitor that keeps track of the cyclomatic complexity of the elements. :param to_method: If True, every function is treated as a method. In this case the *classname* parameter is used as class name. :param classname: Name of parent class. :param off: If True, the starting value for the complexity is set to 1, otherwise to 0. ''' def __init__( self, to_method=False, classname=None, off=True, no_assert=False ): self.off = off self.complexity = 1 if off else 0 self.functions = [] self.classes = [] self.to_method = to_method self.classname = classname self.no_assert = no_assert self._max_line = float('-inf') @property def functions_complexity(self): '''The total complexity from all functions (i.e. the total number of decision points + 1). This is *not* the sum of all the complexity from the functions. Rather, it's the complexity of the code *inside* all the functions. ''' return sum(map(GET_COMPLEXITY, self.functions)) - len(self.functions) @property def classes_complexity(self): '''The total complexity from all classes (i.e. the total number of decision points + 1). ''' return sum(map(GET_REAL_COMPLEXITY, self.classes)) - len(self.classes) @property def total_complexity(self): '''The total complexity. Computed adding up the visitor complexity, the functions complexity, and the classes complexity. ''' return ( self.complexity + self.functions_complexity + self.classes_complexity + (not self.off) ) @property def blocks(self): '''All the blocks visited. These include: all the functions, the classes and their methods. The returned list is not sorted. ''' blocks = [] blocks.extend(self.functions) for cls in self.classes: blocks.append(cls) blocks.extend(cls.methods) return blocks @property def max_line(self): '''The maximum line number among the analyzed lines.''' return self._max_line @max_line.setter def max_line(self, value): '''The maximum line number among the analyzed lines.''' if value > self._max_line: self._max_line = value def generic_visit(self, node): '''Main entry point for the visitor.''' # Get the name of the class name = self.get_name(node) # Check for a lineno attribute if hasattr(node, 'lineno'): self.max_line = node.lineno # The Try/Except block is counted as the number of handlers # plus the `else` block. # In Python 3.3 the TryExcept and TryFinally nodes have been merged # into a single node: Try if name in ('Try', 'TryExcept'): self.complexity += len(node.handlers) + bool(node.orelse) elif name == 'BoolOp': self.complexity += len(node.values) - 1 # Ifs, with and assert statements count all as 1. # Note: Lambda functions are not counted anymore, see #68 elif name in ('If', 'IfExp'): self.complexity += 1 # The For and While blocks count as 1 plus the `else` block. elif name in ('For', 'While', 'AsyncFor'): self.complexity += bool(node.orelse) + 1 # List, set, dict comprehensions and generator exps count as 1 plus # the `if` statement. elif name == 'comprehension': self.complexity += len(node.ifs) + 1 super(ComplexityVisitor, self).generic_visit(node) def visit_Assert(self, node): '''When visiting `assert` statements, the complexity is increased only if the `no_assert` attribute is `False`. ''' self.complexity += not self.no_assert def visit_AsyncFunctionDef(self, node): '''Async function definition is the same thing as the synchronous one. ''' self.visit_FunctionDef(node) def visit_FunctionDef(self, node): '''When visiting functions a new visitor is created to recursively analyze the function's body. ''' # The complexity of a function is computed taking into account # the following factors: number of decorators, the complexity # the function's body and the number of closures (which count # double). closures = [] body_complexity = 1 for child in node.body: visitor = ComplexityVisitor(off=False, no_assert=self.no_assert) visitor.visit(child) closures.extend(visitor.functions) # Add general complexity but not closures' complexity, see #68 body_complexity += visitor.complexity func = Function( node.name, node.lineno, node.col_offset, max(node.lineno, visitor.max_line), self.to_method, self.classname, closures, body_complexity, ) self.functions.append(func) def visit_ClassDef(self, node): '''When visiting classes a new visitor is created to recursively analyze the class' body and methods. ''' # The complexity of a class is computed taking into account # the following factors: number of decorators and the complexity # of the class' body (which is the sum of all the complexities). methods = [] # According to Cyclomatic Complexity definition it has to start off # from 1. body_complexity = 1 classname = node.name visitors_max_lines = [node.lineno] inner_classes = [] for child in node.body: visitor = ComplexityVisitor( True, classname, off=False, no_assert=self.no_assert ) visitor.visit(child) methods.extend(visitor.functions) body_complexity += ( visitor.complexity + visitor.functions_complexity + len(visitor.functions) ) visitors_max_lines.append(visitor.max_line) inner_classes.extend(visitor.classes) cls = Class( classname, node.lineno, node.col_offset, max(visitors_max_lines + list(map(GET_ENDLINE, methods))), methods, inner_classes, body_complexity, ) self.classes.append(cls) class HalsteadVisitor(CodeVisitor): '''Visitor that keeps track of operators and operands, in order to compute Halstead metrics (see :func:`radon.metrics.h_visit`). ''' # As of Python 3.8 Num/Str/Bytes/NameConstat # are now in a common node Constant. types = { "Num": "n", "Name": "id", "Attribute": "attr", "Constant": "value", } def __init__(self, context=None): '''*context* is a string used to keep track the analysis' context.''' self.operators_seen = set() self.operands_seen = set() self.operators = 0 self.operands = 0 self.context = context # A new visitor is spawned for every scanned function. self.function_visitors = [] @property def distinct_operators(self): '''The number of distinct operators.''' return len(self.operators_seen) @property def distinct_operands(self): '''The number of distinct operands.''' return len(self.operands_seen) def dispatch(meth): '''This decorator does all the hard work needed for every node. The decorated method must return a tuple of 4 elements: * the number of operators * the number of operands * the operators seen (a sequence) * the operands seen (a sequence) ''' def aux(self, node): '''Actual function that updates the stats.''' results = meth(self, node) self.operators += results[0] self.operands += results[1] self.operators_seen.update(results[2]) for operand in results[3]: new_operand = getattr( operand, self.types.get(type(operand), ''), operand ) name = self.get_name(operand) new_operand = getattr( operand, self.types.get(name, ""), operand ) self.operands_seen.add((self.context, new_operand)) # Now dispatch to children super(HalsteadVisitor, self).generic_visit(node) return aux @dispatch def visit_BinOp(self, node): '''A binary operator.''' return (1, 2, (self.get_name(node.op),), (node.left, node.right)) @dispatch def visit_UnaryOp(self, node): '''A unary operator.''' return (1, 1, (self.get_name(node.op),), (node.operand,)) @dispatch def visit_BoolOp(self, node): '''A boolean operator.''' return (1, len(node.values), (self.get_name(node.op),), node.values) @dispatch def visit_AugAssign(self, node): '''An augmented assign (contains an operator).''' return (1, 2, (self.get_name(node.op),), (node.target, node.value)) @dispatch def visit_Compare(self, node): '''A comparison.''' return ( len(node.ops), len(node.comparators) + 1, map(self.get_name, node.ops), node.comparators + [node.left], ) def visit_FunctionDef(self, node): '''When visiting functions, another visitor is created to recursively analyze the function's body. We also track information on the function itself. ''' func_visitor = HalsteadVisitor(context=node.name) for child in node.body: visitor = HalsteadVisitor.from_ast(child, context=node.name) self.operators += visitor.operators self.operands += visitor.operands self.operators_seen.update(visitor.operators_seen) self.operands_seen.update(visitor.operands_seen) func_visitor.operators += visitor.operators func_visitor.operands += visitor.operands func_visitor.operators_seen.update(visitor.operators_seen) func_visitor.operands_seen.update(visitor.operands_seen) # Save the visited function visitor for later reference. self.function_visitors.append(func_visitor) def visit_AsyncFunctionDef(self, node): '''Async functions are similar to standard functions, so treat them as such. ''' self.visit_FunctionDef(node)
rubik/radon
radon/visitors.py
Python
mit
14,879
0
# -*- coding: utf-8 -*- # <Lettuce - Behaviour Driven Development for python> # Copyright (C) <2010-2012> Gabriel Falc達o <gabriel@nacaolivre.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from os.path import dirname, abspath, join from nose.tools import with_setup from tests.asserts import prepare_stdout from tests.asserts import assert_stdout_lines from lettuce import Runner current_dir = abspath(dirname(__file__)) join_path = lambda *x: join(current_dir, *x) @with_setup(prepare_stdout) def test_output_with_success_colorless(): "Language: ja -> sucess colorless" runner = Runner(join_path('ja', 'success', 'dumb.feature'), verbosity=3, no_color=True) runner.run() assert_stdout_lines( u"\n" u"フィーチャ: ダムフィーチャ # tests/functional/language_specific_features/ja/success/dumb.feature:3\n" u" テストをグリーンになればテスト成功 # tests/functional/language_specific_features/ja/success/dumb.feature:4\n" u"\n" u" シナリオ: 何もしない # tests/functional/language_specific_features/ja/success/dumb.feature:6\n" u" 前提 何もしない # tests/functional/language_specific_features/ja/success/dumb_steps.py:6\n" u"\n" u"1 feature (1 passed)\n" u"1 scenario (1 passed)\n" u"1 step (1 passed)\n" ) @with_setup(prepare_stdout) def test_output_of_table_with_success_colorless(): "Language: ja -> sucess table colorless" runner = Runner(join_path('ja', 'success', 'table.feature'), verbosity=3, no_color=True) runner.run() assert_stdout_lines( u"\n" u"フィーチャ: テーブル記法 # tests/functional/language_specific_features/ja/success/table.feature:3\n" u" 日本語でのテーブル記法がパスするかのテスト # tests/functional/language_specific_features/ja/success/table.feature:4\n" u"\n" u" シナリオ: 何もしないテーブル # tests/functional/language_specific_features/ja/success/table.feature:6\n" u" 前提 データは以下: # tests/functional/language_specific_features/ja/success/table_steps.py:6\n" u" | id | 定義 |\n" u" | 12 | 何かの定義 |\n" u" | 64 | 別の定義 |\n" u"\n" u"1 feature (1 passed)\n" u"1 scenario (1 passed)\n" u"1 step (1 passed)\n" ) @with_setup(prepare_stdout) def test_output_outlines_success_colorless(): "Language: ja -> sucess outlines colorless" runner = Runner(join_path('ja', 'success', 'outlines.feature'), verbosity=3, no_color=True) runner.run() assert_stdout_lines( u"\n" u"フィーチャ: アウトラインを日本語で書く # tests/functional/language_specific_features/ja/success/outlines.feature:3\n" u" 図表のテストをパスすること # tests/functional/language_specific_features/ja/success/outlines.feature:4\n" u"\n" u" シナリオアウトライン: 全てのテストで何もしない # tests/functional/language_specific_features/ja/success/outlines.feature:6\n" u" 前提 入力値を <データ1> とし # tests/functional/language_specific_features/ja/success/outlines_steps.py:13\n" u" もし 処理 <方法> を使って # tests/functional/language_specific_features/ja/success/outlines_steps.py:22\n" u" ならば 表示は <結果> である # tests/functional/language_specific_features/ja/success/outlines_steps.py:31\n" u"\n" u" 例:\n" u" | データ1 | 方法 | 結果 |\n" u" | 何か | これ | 機能 |\n" u" | その他 | ここ | 同じ |\n" u" | データ | 動く | unicodeで! |\n" u"\n" u"1 feature (1 passed)\n" u"3 scenarios (3 passed)\n" u"9 steps (9 passed)\n" ) @with_setup(prepare_stdout) def test_output_outlines_success_colorful(): "Language: ja -> sucess outlines colorful" runner = Runner(join_path('ja', 'success', 'outlines.feature'), verbosity=3, no_color=False) runner.run() assert_stdout_lines( u'\n' u"\033[1;37mフィーチャ: アウトラインを日本語で書く \033[1;30m# tests/functional/language_specific_features/ja/success/outlines.feature:3\033[0m\n" u"\033[1;37m 図表のテストをパスすること \033[1;30m# tests/functional/language_specific_features/ja/success/outlines.feature:4\033[0m\n" u'\n' u"\033[1;37m シナリオアウトライン: 全てのテストで何もしない \033[1;30m# tests/functional/language_specific_features/ja/success/outlines.feature:6\033[0m\n" u"\033[0;36m 前提 入力値を <データ1> とし \033[1;30m# tests/functional/language_specific_features/ja/success/outlines_steps.py:13\033[0m\n" u"\033[0;36m もし 処理 <方法> を使って \033[1;30m# tests/functional/language_specific_features/ja/success/outlines_steps.py:22\033[0m\n" u"\033[0;36m ならば 表示は <結果> である \033[1;30m# tests/functional/language_specific_features/ja/success/outlines_steps.py:31\033[0m\n" u'\n' u"\033[1;37m 例:\033[0m\n" u"\033[0;36m \033[1;37m |\033[0;36m データ1\033[1;37m |\033[0;36m 方法\033[1;37m |\033[0;36m 結果 \033[1;37m |\033[0;36m\033[0m\n" u"\033[1;32m \033[1;37m |\033[1;32m 何か \033[1;37m |\033[1;32m これ\033[1;37m |\033[1;32m 機能 \033[1;37m |\033[1;32m\033[0m\n" u"\033[1;32m \033[1;37m |\033[1;32m その他 \033[1;37m |\033[1;32m ここ\033[1;37m |\033[1;32m 同じ \033[1;37m |\033[1;32m\033[0m\n" u"\033[1;32m \033[1;37m |\033[1;32m データ \033[1;37m |\033[1;32m 動く\033[1;37m |\033[1;32m unicodeで!\033[1;37m |\033[1;32m\033[0m\n" u'\n' u"\033[1;37m1 feature (\033[1;32m1 passed\033[1;37m)\033[0m\n" u"\033[1;37m3 scenarios (\033[1;32m3 passed\033[1;37m)\033[0m\n" u"\033[1;37m9 steps (\033[1;32m9 passed\033[1;37m)\033[0m\n" )
yangming85/lettuce
tests/functional/language_specific_features/test_ja.py
Python
gpl-3.0
6,962
0.005402
#!/usr/bin/env python """Output plugins implementations.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from grr_response_server import output_plugin # pylint: disable=unused-import,g-import-not-at-top try: from grr_response_server.output_plugins import bigquery_plugin except ImportError: pass from grr_response_server.output_plugins import csv_plugin from grr_response_server.output_plugins import email_plugin from grr_response_server.output_plugins import splunk_plugin from grr_response_server.output_plugins import sqlite_plugin from grr_response_server.output_plugins import yaml_plugin
dunkhong/grr
grr/server/grr_response_server/output_plugins/__init__.py
Python
apache-2.0
668
0.002994
"""Helper to help store data.""" from __future__ import annotations import asyncio from collections.abc import Callable from contextlib import suppress from json import JSONEncoder import logging import os from typing import Any from homeassistant.const import EVENT_HOMEASSISTANT_FINAL_WRITE from homeassistant.core import CALLBACK_TYPE, CoreState, Event, HomeAssistant, callback from homeassistant.helpers.event import async_call_later from homeassistant.loader import MAX_LOAD_CONCURRENTLY, bind_hass from homeassistant.util import json as json_util # mypy: allow-untyped-calls, allow-untyped-defs, no-warn-return-any # mypy: no-check-untyped-defs STORAGE_DIR = ".storage" _LOGGER = logging.getLogger(__name__) STORAGE_SEMAPHORE = "storage_semaphore" @bind_hass async def async_migrator( hass, old_path, store, *, old_conf_load_func=None, old_conf_migrate_func=None, ): """Migrate old data to a store and then load data. async def old_conf_migrate_func(old_data) """ store_data = await store.async_load() # If we already have store data we have already migrated in the past. if store_data is not None: return store_data def load_old_config(): """Load old config.""" if not os.path.isfile(old_path): return None if old_conf_load_func is not None: return old_conf_load_func(old_path) return json_util.load_json(old_path) config = await hass.async_add_executor_job(load_old_config) if config is None: return None if old_conf_migrate_func is not None: config = await old_conf_migrate_func(config) await store.async_save(config) await hass.async_add_executor_job(os.remove, old_path) return config @bind_hass class Store: """Class to help storing data.""" def __init__( self, hass: HomeAssistant, version: int, key: str, private: bool = False, *, encoder: type[JSONEncoder] | None = None, ) -> None: """Initialize storage class.""" self.version = version self.key = key self.hass = hass self._private = private self._data: dict[str, Any] | None = None self._unsub_delay_listener: CALLBACK_TYPE | None = None self._unsub_final_write_listener: CALLBACK_TYPE | None = None self._write_lock = asyncio.Lock() self._load_task: asyncio.Future | None = None self._encoder = encoder @property def path(self): """Return the config path.""" return self.hass.config.path(STORAGE_DIR, self.key) async def async_load(self) -> dict | list | None: """Load data. If the expected version does not match the given version, the migrate function will be invoked with await migrate_func(version, config). Will ensure that when a call comes in while another one is in progress, the second call will wait and return the result of the first call. """ if self._load_task is None: self._load_task = self.hass.async_create_task(self._async_load()) return await self._load_task async def _async_load(self): """Load the data and ensure the task is removed.""" if STORAGE_SEMAPHORE not in self.hass.data: self.hass.data[STORAGE_SEMAPHORE] = asyncio.Semaphore(MAX_LOAD_CONCURRENTLY) try: async with self.hass.data[STORAGE_SEMAPHORE]: return await self._async_load_data() finally: self._load_task = None async def _async_load_data(self): """Load the data.""" # Check if we have a pending write if self._data is not None: data = self._data # If we didn't generate data yet, do it now. if "data_func" in data: data["data"] = data.pop("data_func")() else: data = await self.hass.async_add_executor_job( json_util.load_json, self.path ) if data == {}: return None if data["version"] == self.version: stored = data["data"] else: _LOGGER.info( "Migrating %s storage from %s to %s", self.key, data["version"], self.version, ) stored = await self._async_migrate_func(data["version"], data["data"]) return stored async def async_save(self, data: dict | list) -> None: """Save data.""" self._data = {"version": self.version, "key": self.key, "data": data} if self.hass.state == CoreState.stopping: self._async_ensure_final_write_listener() return await self._async_handle_write_data() @callback def async_delay_save(self, data_func: Callable[[], dict], delay: float = 0) -> None: """Save data with an optional delay.""" self._data = {"version": self.version, "key": self.key, "data_func": data_func} self._async_cleanup_delay_listener() self._async_ensure_final_write_listener() if self.hass.state == CoreState.stopping: return self._unsub_delay_listener = async_call_later( self.hass, delay, self._async_callback_delayed_write ) @callback def _async_ensure_final_write_listener(self) -> None: """Ensure that we write if we quit before delay has passed.""" if self._unsub_final_write_listener is None: self._unsub_final_write_listener = self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_FINAL_WRITE, self._async_callback_final_write ) @callback def _async_cleanup_final_write_listener(self) -> None: """Clean up a stop listener.""" if self._unsub_final_write_listener is not None: self._unsub_final_write_listener() self._unsub_final_write_listener = None @callback def _async_cleanup_delay_listener(self) -> None: """Clean up a delay listener.""" if self._unsub_delay_listener is not None: self._unsub_delay_listener() self._unsub_delay_listener = None async def _async_callback_delayed_write(self, _now): """Handle a delayed write callback.""" # catch the case where a call is scheduled and then we stop Home Assistant if self.hass.state == CoreState.stopping: self._async_ensure_final_write_listener() return await self._async_handle_write_data() async def _async_callback_final_write(self, _event: Event) -> None: """Handle a write because Home Assistant is in final write state.""" self._unsub_final_write_listener = None await self._async_handle_write_data() async def _async_handle_write_data(self, *_args): """Handle writing the config.""" async with self._write_lock: self._async_cleanup_delay_listener() self._async_cleanup_final_write_listener() if self._data is None: # Another write already consumed the data return data = self._data if "data_func" in data: data["data"] = data.pop("data_func")() self._data = None try: await self.hass.async_add_executor_job( self._write_data, self.path, data ) except (json_util.SerializationError, json_util.WriteError) as err: _LOGGER.error("Error writing config for %s: %s", self.key, err) def _write_data(self, path: str, data: dict) -> None: """Write the data.""" if not os.path.isdir(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) _LOGGER.debug("Writing data for %s to %s", self.key, path) json_util.save_json(path, data, self._private, encoder=self._encoder) async def _async_migrate_func(self, old_version, old_data): """Migrate to the new version.""" raise NotImplementedError async def async_remove(self) -> None: """Remove all data.""" self._async_cleanup_delay_listener() self._async_cleanup_final_write_listener() with suppress(FileNotFoundError): await self.hass.async_add_executor_job(os.unlink, self.path)
aronsky/home-assistant
homeassistant/helpers/storage.py
Python
apache-2.0
8,433
0.00083
# Copyright (c) 2011 Openstack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handles all requests relating to schedulers. """ import novaclient from nova import db from nova import exception from nova import flags from nova import log as logging from nova import rpc from eventlet import greenpool FLAGS = flags.FLAGS flags.DEFINE_bool('enable_zone_routing', False, 'When True, routing to child zones will occur.') LOG = logging.getLogger('nova.scheduler.api') def _call_scheduler(method, context, params=None): """Generic handler for RPC calls to the scheduler. :param params: Optional dictionary of arguments to be passed to the scheduler worker :retval: Result returned by scheduler worker """ if not params: params = {} queue = FLAGS.scheduler_topic kwargs = {'method': method, 'args': params} return rpc.call(context, queue, kwargs) def get_zone_list(context): """Return a list of zones assoicated with this zone.""" items = _call_scheduler('get_zone_list', context) for item in items: item['api_url'] = item['api_url'].replace('\\/', '/') if not items: items = db.zone_get_all(context) return items def zone_get(context, zone_id): return db.zone_get(context, zone_id) def zone_delete(context, zone_id): return db.zone_delete(context, zone_id) def zone_create(context, data): return db.zone_create(context, data) def zone_update(context, zone_id, data): return db.zone_update(context, zone_id, data) def get_zone_capabilities(context): """Returns a dict of key, value capabilities for this zone.""" return _call_scheduler('get_zone_capabilities', context=context) def select(context, specs=None): """Returns a list of hosts.""" return _call_scheduler('select', context=context, params={"specs": specs}) def update_service_capabilities(context, service_name, host, capabilities): """Send an update to all the scheduler services informing them of the capabilities of this service.""" kwargs = dict(method='update_service_capabilities', args=dict(service_name=service_name, host=host, capabilities=capabilities)) return rpc.fanout_cast(context, 'scheduler', kwargs) def _wrap_method(function, self): """Wrap method to supply self.""" def _wrap(*args, **kwargs): return function(self, *args, **kwargs) return _wrap def _process(func, zone): """Worker stub for green thread pool. Give the worker an authenticated nova client and zone info.""" nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url) nova.authenticate() return func(nova, zone) def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs): """Returns a list of (zone, call_result) objects.""" if not isinstance(errors_to_ignore, (list, tuple)): # This will also handle the default None errors_to_ignore = [errors_to_ignore] pool = greenpool.GreenPool() results = [] for zone in db.zone_get_all(context): try: nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url) nova.authenticate() except novaclient.exceptions.BadRequest, e: url = zone.api_url LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") % locals()) #TODO (dabo) - add logic for failure counts per zone, # with escalation after a given number of failures. continue zone_method = getattr(nova.zones, method) def _error_trap(*args, **kwargs): try: return zone_method(*args, **kwargs) except Exception as e: if type(e) in errors_to_ignore: return None # TODO (dabo) - want to be able to re-raise here. # Returning a string now; raising was causing issues. # raise e return "ERROR", "%s" % e res = pool.spawn(_error_trap, *args, **kwargs) results.append((zone, res)) pool.waitall() return [(zone.id, res.wait()) for zone, res in results] def child_zone_helper(zone_list, func): """Fire off a command to each zone in the list. The return is [novaclient return objects] from each child zone. For example, if you are calling server.pause(), the list will be whatever the response from server.pause() is. One entry per child zone called.""" green_pool = greenpool.GreenPool() return [result for result in green_pool.imap( _wrap_method(_process, func), zone_list)] def _issue_novaclient_command(nova, zone, collection, method_name, item_id): """Use novaclient to issue command to a single child zone. One of these will be run in parallel for each child zone.""" manager = getattr(nova, collection) result = None try: try: result = manager.get(int(item_id)) except ValueError, e: result = manager.find(name=item_id) except novaclient.NotFound: url = zone.api_url LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" % locals())) return None if method_name.lower() not in ['get', 'find']: result = getattr(result, method_name)() return result def wrap_novaclient_function(f, collection, method_name, item_id): """Appends collection, method_name and item_id to the incoming (nova, zone) call from child_zone_helper.""" def inner(nova, zone): return f(nova, zone, collection, method_name, item_id) return inner class RedirectResult(exception.Error): """Used to the HTTP API know that these results are pre-cooked and they can be returned to the caller directly.""" def __init__(self, results): self.results = results super(RedirectResult, self).__init__( message=_("Uncaught Zone redirection exception")) class reroute_compute(object): """Decorator used to indicate that the method should delegate the call the child zones if the db query can't find anything.""" def __init__(self, method_name): self.method_name = method_name def __call__(self, f): def wrapped_f(*args, **kwargs): collection, context, item_id = \ self.get_collection_context_and_id(args, kwargs) try: # Call the original function ... return f(*args, **kwargs) except exception.InstanceNotFound, e: LOG.debug(_("Instance %(item_id)s not found " "locally: '%(e)s'" % locals())) if not FLAGS.enable_zone_routing: raise zones = db.zone_get_all(context) if not zones: raise # Ask the children to provide an answer ... LOG.debug(_("Asking child zones ...")) result = self._call_child_zones(zones, wrap_novaclient_function(_issue_novaclient_command, collection, self.method_name, item_id)) # Scrub the results and raise another exception # so the API layers can bail out gracefully ... raise RedirectResult(self.unmarshall_result(result)) return wrapped_f def _call_child_zones(self, zones, function): """Ask the child zones to perform this operation. Broken out for testing.""" return child_zone_helper(zones, function) def get_collection_context_and_id(self, args, kwargs): """Returns a tuple of (novaclient collection name, security context and resource id. Derived class should override this.""" context = kwargs.get('context', None) instance_id = kwargs.get('instance_id', None) if len(args) > 0 and not context: context = args[1] if len(args) > 1 and not instance_id: instance_id = args[2] return ("servers", context, instance_id) def unmarshall_result(self, zone_responses): """Result is a list of responses from each child zone. Each decorator derivation is responsible to turning this into a format expected by the calling method. For example, this one is expected to return a single Server dict {'server':{k:v}}. Others may return a list of them, like {'servers':[{k,v}]}""" reduced_response = [] for zone_response in zone_responses: if not zone_response: continue server = zone_response.__dict__ for k in server.keys(): if k[0] == '_' or k == 'manager': del server[k] reduced_response.append(dict(server=server)) if reduced_response: return reduced_response[0] # first for now. return {} def redirect_handler(f): def new_f(*args, **kwargs): try: return f(*args, **kwargs) except RedirectResult, e: return e.results return new_f
superstack/nova
nova/scheduler/api.py
Python
apache-2.0
9,884
0.001012
import pytest import numpy as np import scipy.linalg import scipy.sparse import qutip if qutip.settings.has_mkl: from qutip._mkl.spsolve import mkl_splu, mkl_spsolve pytestmark = [ pytest.mark.skipif(not qutip.settings.has_mkl, reason='MKL extensions not found.'), ] class Test_spsolve: def test_single_rhs_vector_real(self): Adense = np.array([[0, 1, 1], [1, 0, 1], [0, 0, 1]]) As = scipy.sparse.csr_matrix(Adense) np.random.seed(1234) x = np.random.randn(3) b = As * x x2 = mkl_spsolve(As, b, verbose=True) np.testing.assert_allclose(x, x2) def test_single_rhs_vector_complex(self): A = qutip.rand_herm(10) x = qutip.rand_ket(10).full() b = A.full() @ x y = mkl_spsolve(A.data, b, verbose=True) np.testing.assert_allclose(x, y) @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) def test_multi_rhs_vector(self, dtype): M = np.array([ [1, 0, 2], [0, 0, 3], [-4, 5, 6], ], dtype=dtype) sM = scipy.sparse.csr_matrix(M) N = np.array([ [3, 0, 1], [0, 2, 0], [0, 0, 0], ], dtype=dtype) sX = mkl_spsolve(sM, N, verbose=True) X = scipy.linalg.solve(M, N) np.testing.assert_allclose(X, sX) def test_rhs_shape_is_maintained(self): A = scipy.sparse.csr_matrix(np.array([ [1, 0, 2], [0, 0, 3], [-4, 5, 6], ], dtype=np.complex128)) b = np.array([0, 2, 0], dtype=np.complex128) out = mkl_spsolve(A, b, verbose=True) assert b.shape == out.shape b = np.array([0, 2, 0], dtype=np.complex128).reshape((3, 1)) out = mkl_spsolve(A, b, verbose=True) assert b.shape == out.shape def test_sparse_rhs(self): A = scipy.sparse.csr_matrix([ [1, 2, 0], [0, 3, 0], [0, 0, 5], ]) b = scipy.sparse.csr_matrix([ [0, 1], [1, 0], [0, 0], ]) x = mkl_spsolve(A, b, verbose=True) ans = np.array([[-0.66666667, 1], [0.33333333, 0], [0, 0]]) np.testing.assert_allclose(x.toarray(), ans) @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) def test_symmetric_solver(self, dtype): A = qutip.rand_herm(np.arange(1, 11)).data if dtype == np.float64: A = A.real x = np.ones(10, dtype=dtype) b = A.dot(x) y = mkl_spsolve(A, b, hermitian=1, verbose=True) np.testing.assert_allclose(x, y) class Test_splu: @pytest.mark.parametrize('dtype', [np.float64, np.complex128]) def test_repeated_rhs_solve(self, dtype): M = np.array([ [1, 0, 2], [0, 0, 3], [-4, 5, 6], ], dtype=dtype) sM = scipy.sparse.csr_matrix(M) N = np.array([ [3, 0, 1], [0, 2, 0], [0, 0, 0], ], dtype=dtype) test_X = np.zeros((3, 3), dtype=dtype) lu = mkl_splu(sM, verbose=True) for k in range(3): test_X[:, k] = lu.solve(N[:, k]) lu.delete() expected_X = scipy.linalg.solve(M, N) np.testing.assert_allclose(test_X, expected_X)
qutip/qutip
qutip/tests/test_mkl.py
Python
bsd-3-clause
3,447
0
#!/usr/bin/env python # -*- coding: utf-8 -*- # MySQL Connector/Python - MySQL driver written in Python. # Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved. # MySQL Connector/Python is licensed under the terms of the GPLv2 # <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most # MySQL Connectors. There are special exceptions to the terms and # conditions of the GPLv2 as it is applied to this software, see the # FOSS License Exception # <http://www.mysql.com/about/legal/licensing/foss-exception.html>. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import sys, os import mysql.connector """ Example using MySQL Connector/Python showing: * that show engines works.. """ def main(config): output = [] db = mysql.connector.Connect(**config) cursor = db.cursor() # Select it again and show it stmt_select = "SHOW ENGINES" cursor.execute(stmt_select) rows = cursor.fetchall() for row in rows: output.append(repr(row)) db.close() return output if __name__ == '__main__': # # Configure MySQL login and database to use in config.py # import config config = config.Config.dbinfo().copy() out = main(config) print('\n'.join(out))
mitchcapper/mythbox
resources/lib/mysql-connector-python/python3/examples/engines.py
Python
gpl-2.0
1,836
0.002179
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # EDPC Mentoring Database documentation build configuration file, created by # sphinx-quickstart on Thu Apr 28 23:28:25 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import django # on_rtd is whether we are on readthedocs.org, this line of code grabbed from # docs.readthedocs.org on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Add the project top-level directory to the import path so that we may find # Django applications. sys.path.insert(0, os.path.abspath(os.path.join('..', 'edpcmentoring'))) # Since we document some classes which make use of Django constructs, we need to # make sure Django is configured. os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edpcmentoring.settings_development") django.setup() # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'EDPC Mentoring Database' copyright = '2016, EDPC' author = 'EDPC' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1.0' # The full version, including alpha/beta/rc tags. release = '0.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. #html_title = 'EDPC Mentoring Database v0.1.0' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. #html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'EDPCMentoringDatabasedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'EDPCMentoringDatabase.tex', 'EDPC Mentoring Database Documentation', 'EDPC', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'edpcmentoringdatabase', 'EDPC Mentoring Database Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'EDPCMentoringDatabase', 'EDPC Mentoring Database Documentation', author, 'EDPCMentoringDatabase', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
rjw57/edpcmentoring
docs/conf.py
Python
mit
10,393
0.005966
""" Serialize data to/from JSON """ # Avoid shadowing the standard library json module from __future__ import absolute_import, unicode_literals import datetime import decimal import json import sys import uuid from io import BytesIO from django.core.serializers.base import DeserializationError from django.core.serializers.python import ( Deserializer as PythonDeserializer, Serializer as PythonSerializer, ) from django.core.serializers.json import DjangoJSONEncoder from django.utils import six from django.utils.timezone import is_aware class Serializer(PythonSerializer): """ Convert a queryset to JSON. """ internal_use_only = False def _init_options(self): if json.__version__.split('.') >= ['2', '1', '3']: # Use JS strings to represent Python Decimal instances (ticket #16850) self.options.update({'use_decimal': False}) self._current = None self.json_kwargs = self.options.copy() self.json_kwargs.pop('stream', None) self.json_kwargs.pop('fields', None) def start_serialization(self): self._init_options() def end_serialization(self): ''' Do nothing ''' def end_object(self, obj): # self._current has the field data json.dump(self.get_dump_object(obj), self.stream, cls=DjangoJSONEncoder, **self.json_kwargs) self.stream.write('\n') self._current = None def getvalue(self): # Grand-parent super return super(PythonSerializer, self).getvalue() def Deserializer(stream_or_string, **options): """ Deserialize a stream or string of JSON data. """ if isinstance(stream_or_string, (bytes, six.string_types)): stream_or_string = BytesIO(stream_or_string) try: def line_generator(): for line in stream_or_string: yield json.loads(line.strip()) for obj in PythonDeserializer(line_generator(), **options): yield obj except GeneratorExit: raise except Exception as e: # Map to deserializer error six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
superisaac/django-mljson-serializer
django_mljson/serializer.py
Python
mit
2,206
0.001813
# Python - 3.6.0 test.assert_equals(generateShape(3), '+++\n+++\n+++') test.assert_equals(generateShape(8), '++++++++\n++++++++\n++++++++\n++++++++\n++++++++\n++++++++\n++++++++\n++++++++')
RevansChen/online-judge
Codewars/7kyu/build-a-square/Python/test.py
Python
mit
191
0.005236
# -*- coding: utf-8; fill-column: 78 -*- import collections import itertools import operator from flatland.schema.paths import pathexpr from flatland.signals import validator_validated from flatland.util import ( Unspecified, assignable_class_property, class_cloner, named_int_factory, symbol, ) __all__ = 'Element' NoneType = type(None) Root = symbol('Root') NotEmpty = symbol('NotEmpty') Skip = named_int_factory('Skip', True, doc="""\ Abort validation of the element & mark as valid. """) SkipAll = named_int_factory('SkipAll', True, doc="""\ Abort validation of the element and its children & mark as valid. The :attr:`~Element.valid` of child elements will not be changed by skipping. Unless otherwise set, the child elements will retain the default value (:obj:`Unevaluated`). Only meaningful during a decent validation. Functions as :obj:`Skip` on upward validation. """) SkipAllFalse = named_int_factory('SkipAllFalse', False, doc="""\ Aborts validation of the element and its children & mark as invalid. The :attr:`~Element.valid` of child elements will not be changed by skipping. Unless otherwise set, the child elements will retain the default value (:obj:`Unevaluated`). Only meaningful during a decent validation. Functions as ``False`` on upward validation. """) Unevaluated = named_int_factory('Unevaluated', True, doc="""\ A psuedo-boolean representing a presumptively valid state. Assigned to newly created elements that have never been evaluated by :meth:`Element.validate`. Evaluates to true. """) # TODO: implement a lighter version of the xml quoters xml = None class _BaseElement(object): # Required by the genshi support's __bases__ manipulation, unfortunately. pass class Element(_BaseElement): """Base class for form fields. A data node that stores a Python and a text value plus added state. """ name = None """The Unicode name of the element.""" optional = False """If True, :meth:`validate` with return True if no value has been set. :attr:`validators` are not called for optional, empty elements. """ validators = () """A sequence of validators, invoked by :meth:`validate`. See `Validation`_ """ default = None """The default value of this element.""" default_factory = None """A callable to generate default element values. Passed an element. *default_factory* will be used preferentially over :attr:`default`. """ ugettext = None """If set, provides translation support to validation messages. See `Message Internationalization`_. """ ungettext = None """If set, provides translation support to validation messages. See `Message Internationalization`_. """ value = None """The element's native Python value. Only validation routines should write this attribute directly: use :meth:`set` to update the element's value. """ u = u'' """A Unicode representation of the element's value. As in :attr:`value`, writing directly to this attribute should be restricted to validation routines. """ flattenable = False children_flattenable = True validates_down = None validates_up = None def __init__(self, value=Unspecified, **kw): self.parent = kw.pop('parent', None) self.valid = Unevaluated self.errors = [] self.warnings = [] # FIXME This (and 'using') should also do descent_validators # via lookup - or don't copy at all if 'validators' in kw: kw['validators'] = list(kw['validators']) for attribute, override in kw.items(): if hasattr(self, attribute): setattr(self, attribute, override) else: raise TypeError( "%r is an invalid keyword argument: not a known " "argument or an overridable class property of %s" % ( attribute, type(self).__name__)) if value is not Unspecified: self.set(value) @class_cloner def named(cls, name): """Return a class with ``name`` = *name* :param name: a string or None. ``str`` will be converted to ``unicode``. :returns: a new class """ if not isinstance(name, (unicode, NoneType)): name = unicode(name) cls.name = name return cls @class_cloner def using(cls, **overrides): """Return a class with attributes set from *\*\*overrides*. :param \*\*overrides: new values for any attributes already present on the class. A ``TypeError`` is raised for unknown attributes. :returns: a new class """ # TODO: See TODO in __init__ if 'validators' in overrides: overrides['validators'] = list(overrides['validators']) for attribute, value in overrides.iteritems(): # TODO: must make better if callable(value): value = staticmethod(value) if hasattr(cls, attribute): setattr(cls, attribute, value) continue raise TypeError( "%r is an invalid keyword argument: not a known " "argument or an overridable class property of %s" % ( attribute, cls.__name__)) return cls @class_cloner def validated_by(cls, *validators): """Return a class with validators set to *\*validators*. :param \*validators: one or more validator functions, replacing any validators present on the class. :returns: a new class """ # TODO: See TODO in __init__ for validator in validators: # metaclass gymnastics can fool this assertion. don't do that. if isinstance(validator, type): raise TypeError( "Validator %r is a type, not a callable or instance of a" "validator class. Did you mean %r()?" % ( validator, validator)) cls.validators = list(validators) return cls @class_cloner def including_validators(cls, *validators, **kw): """Return a class with additional *\*validators*. :param \*validators: one or more validator functions :param position: defaults to -1. By default, additional validators are placed after existing validators. Use 0 for before, or any other list index to splice in *validators* at that point. :returns: a new class """ position = kw.pop('position', -1) if kw: raise TypeError('including_validators() got an ' 'unexpected keyword argument %r' % ( kw.popitem()[0])) mutable = list(cls.validators) if position < 0: position = len(mutable) + 1 + position mutable[position:position] = list(validators) cls.validators = mutable return cls def validate_element(self, element, state, descending): """Assess the validity of an element. TODO: this method is dead. Evaluate docstring for good bits that should be elsewhere. :param element: an :class:`Element` :param state: may be None, an optional value of supplied to ``element.validate`` :param descending: a boolean, True the first time the element has been seen in this run, False the next :returns: boolean; a truth value or None The :meth:`Element.validate` process visits each element in the tree twice: once heading down the tree, breadth-first, and again heading back up in the reverse direction. Scalar fields will typically validate on the first pass, and containers on the second. Return no value or None to ``pass``, accepting the element as presumptively valid. Exceptions raised by :meth:`validate_element` will not be caught by :meth:`Element.validate`. Directly modifying and normalizing :attr:`Element.value` and :attr:`Element.u` within a validation routine is acceptable. The standard implementation of validate_element is: - If :attr:`element.is_empty` and :attr:`self.optional`, return True. - If :attr:`self.validators` is empty and :attr:`element.is_empty`, return False. - If :attr:`self.validators` is empty and not :attr:`element.is_empty`, return True. - Iterate through :attr:`self.validators`, calling each member with (*element*, *state*). If one returns a false value, stop iterating and return False immediately. - Otherwise return True. """ return validate_element(element, state, self.validators) @classmethod def from_flat(cls, pairs, **kw): """Return a new element with its value initialized from *pairs*. :param \*\*kw: passed through to the :attr:`element_type`. .. testsetup:: import flatland cls = flatland.String pairs = kw = {} This is a convenience constructor for: .. testcode:: element = cls(**kw) element.set_flat(pairs) """ element = cls(**kw) element.set_flat(pairs) return element @classmethod def from_defaults(cls, **kw): """Return a new element with its value initialized from field defaults. :param \*\*kw: passed through to the :attr:`element_type`. .. testsetup:: import flatland cls = flatland.String kw = {} This is a convenience constructor for: .. testcode:: element = cls(**kw) element.set_default() """ element = cls(**kw) element.set_default() return element def __eq__(self, other): try: return self.value == other.value and self.u == other.u except AttributeError: return False def __ne__(self, other): return not self.__eq__(other) @assignable_class_property def label(self, cls): """The label of this element. If unassigned, the *label* will evaluate to the :attr:`name`. """ return cls.name if self is None else self.name def _get_all_valid(self): """True if this element and all children are valid.""" if not self.valid: return False for element in self.all_children: if not element.valid: return False return True def _set_all_valid(self, value): self.valid = value for element in self.all_children: element.valid = value all_valid = property(_get_all_valid, _set_all_valid) del _get_all_valid, _set_all_valid @property def root(self): """The top-most parent of the element.""" try: return list(self.parents)[-1] except IndexError: return self @property def parents(self): """An iterator of all parent elements.""" element = self.parent while element is not None: yield element element = element.parent raise StopIteration() @property def path(self): """An iterator of all elements from root to the Element, inclusive.""" return itertools.chain(reversed(list(self.parents)), (self,)) @property def children(self): """An iterator of immediate child elements.""" return iter(()) @property def all_children(self): """An iterator of all child elements, breadth-first.""" seen, queue = set((id(self),)), collections.deque(self.children) while queue: element = queue.popleft() if id(element) in seen: continue seen.add(id(element)) yield element queue.extend(element.children) def fq_name(self, sep=u'.'): """Return the fully qualified path name of the element. Returns a *sep*-separated string of :meth:`.el` compatible element indexes starting from the :attr:`Element.root` (``.``) down to the element. >>> from flatland import Dict, Integer >>> Point = Dict.named(u'point').of(Integer.named(u'x'), ... Integer.named(u'y')) >>> p = Point(dict(x=10, y=20)) >>> p.name u'point' >>> p.fq_name() u'.' >>> p['x'].name u'x' >>> p['x'].fq_name() u'.x' The index used in a path may not be the :attr:`.name` of the element. For example, sequence members are referenced by their numeric index. >>> from flatland import List, String >>> Addresses = List.named('addresses').of(String.named('address')) >>> form = Addresses([u'uptown', u'downtown']) >>> form.name u'addresses' >>> form.fq_name() u'.' >>> form[0].name u'address' >>> form[0].fq_name() u'.0' """ if self.parent is None: return sep children_of_root = reversed(list(self.parents)[:-1]) parts, mask = [], None for element in list(children_of_root) + [self]: # allow Slot elements to mask the names of their child # e.g. # <List name='l'> <Slot name='0'> <String name='s'> # has an .el()/Python path of just # l.0 # not # l.0.s if isinstance(element, Slot): mask = element.name continue elif mask: parts.append(mask) mask = None continue parts.append(element.name) return sep + sep.join(parts) def find(self, path, single=False, strict=True): """Find child elements by string path. :param path: a /-separated string specifying elements to select, such as 'child/grandchild/greatgrandchild'. Relative & absolute paths are supported, as well as container expansion. See :ref:`path_lookups`. :param single: if true, return a scalar result rather than a list of elements. If no elements match *path*, ``None`` is returned. If multiple elements match, a :exc:`LookupError` is raised. If multiple elements are found and *strict* is false, an unspecified element from the result set is returned. :param strict: defaults to True. If *path* specifies children or sequence indexes that do not exist, a `:ref:`LookupError` is raised. :returns: a list of :class:`Element` instances, an :class:`Element` if *single* is true, or raises :exc:`LookupError`. .. testsetup:: find from flatland import Form, Dict, List, String class Profile(Form): contact = Dict.of(String.named('name'), List.named('addresses'). of(Dict.of(String.named('street1'), String.named('city'))). using(default=1)) form = Profile( {'contact': {'name': 'Obed Marsh', 'addresses': [{'street1': 'Main', 'city': 'Kingsport'}, {'street1': 'Broadway', 'city': 'Dunwich'}]}}) .. doctest:: find >>> cities = form.find('/contact/addresses[:]/city') >>> [el.value for el in cities] [u'Kingsport', u'Dunwich'] >>> form.find('/contact/name', single=True) <String u'name'; value=u'Obed Marsh'> """ expr = pathexpr(path) results = expr(self, strict) if not single: return results elif not results: return None elif len(results) > 1 and strict: raise LookupError("Path %r matched multiple elements; single " "result expected." % (path,)) else: return results[0] def el(self, path, sep=u'.'): """Find a child element by string path. :param path: a *sep*-separated string of element names, or an iterable of names :param sep: optional, a string separator used to parse *path* :returns: an :class:`Element` or raises :exc:`KeyError`. .. testsetup:: el from flatland import Form, Dict, List, String class Profile(Form): contact = Dict.of(List.named('addresses'). of(Dict.of(String.named('street1'), String.named('city'))). using(default=1)) form = Profile.from_defaults() .. doctest:: el >>> first_address = form.el('contact.addresses.0') >>> first_address.el('street1') <String u'street1'; value=None> Given a relative path as above, :meth:`el` searches for a matching path among the element's children. If *path* begins with *sep*, the path is considered fully qualified and the search is resolved from the :attr:`Element.root`. The leading *sep* will always match the root node, regardless of its :attr:`.name`. .. doctest:: el >>> form.el('.contact.addresses.0.city') <String u'city'; value=None> >>> first_address.el('.contact.addresses.0.city') <String u'city'; value=None> """ try: names = list(self._parse_element_path(path, sep)) or () if names[0] is Root: element = self.root names.pop(0) else: element = self while names: element = element._index(names.pop(0)) return element except LookupError: raise KeyError('No element at %r' % (path,)) def _index(self, name): """Return a named child or raise LookupError.""" raise NotImplementedError() @classmethod def _parse_element_path(self, path, sep): if isinstance(path, basestring): if path == sep: return [Root] elif path.startswith(sep): path = path[len(sep):] parts = [Root] else: parts = [] parts.extend(path.split(sep)) return iter(parts) else: return iter(path) # fixme: nuke? if isinstance(path, (list, tuple)) or hasattr(path, 'next'): return path else: assert False return None def add_error(self, message): "Register an error message on this element, ignoring duplicates." if message not in self.errors: self.errors.append(message) def add_warning(self, message): "Register a warning message on this element, ignoring duplicates." if message not in self.warnings: self.warnings.append(message) def flattened_name(self, sep=u'_'): """Return the element's complete flattened name as a string. Joins this element's :attr:`path` with *sep* and returns the fully qualified, flattened name. Encodes all :class:`Container` and other structures into a single string. Example:: >>> import flatland >>> form = flatland.List('addresses', ... flatland.String('address')) >>> element = form() >>> element.set([u'uptown', u'downtown']) >>> element.el('0').value u'uptown' >>> element.el('0').flattened_name() u'addresses_0_address' """ return sep.join(parent.name for parent in self.path if parent.name is not None) def flatten(self, sep=u'_', value=operator.attrgetter('u')): """Export an element hierarchy as a flat sequence of key, value pairs. :arg sep: a string, will join together element names. :arg value: a 1-arg callable called once for each element. Defaults to a callable that returns the :attr:`.u` of each element. Encodes the element hierarchy in a *sep*-separated name string, paired with any representation of the element you like. The default is the Unicode value of the element, and the output of the default :meth:`flatten` can be round-tripped with :meth:`set_flat`. Given a simple form with a string field and a nested dictionary:: >>> from flatland import Dict, String >>> class Nested(Form): ... contact = Dict.of(String.named(u'name'), ... Dict.named(u'address').\ ... of(String.named(u'email'))) ... >>> element = Nested() >>> element.flatten() [(u'contact_name', u''), (u'contact_address_email', u'')] The value of each pair can be customized with the *value* callable:: >>> element.flatten(value=operator.attrgetter('u')) [(u'contact_name', u''), (u'contact_address_email', u'')] >>> element.flatten(value=lambda el: el.value) [(u'contact_name', None), (u'contact_address_email', None)] Solo elements will return a sequence containing a single pair:: >>> element['name'].flatten() [(u'contact_name', u'')] """ if self.flattenable: pairs = [(self.flattened_name(sep), value(self))] else: pairs = [] if self.children_flattenable: pairs.extend((e.flattened_name(sep), value(e)) for e in self.all_children if e.flattenable) return pairs def set(self, value): """Assign the native and Unicode value. Attempts to adapt the given *value* and assigns this element's :attr:`value` and :attr:`u` attributes in tandem. Returns True if the adaptation was successful. If adaptation succeeds, :attr:`value` will contain the adapted native value and :attr:`u` will contain a Unicode serialized version of it. A native value of None will be represented as u'' in :attr:`u`. If adaptation fails, :attr:`value` will be ``None`` and :attr:`u` will contain ``unicode(value)`` or ``u''`` for None. >>> from flatland import Integer >>> el = Integer() >>> el.u, el.value (u'', None) >>> el.set('123') True >>> el.u, el.value (u'123', 123) >>> el.set(456) True >>> el.u, el.value (u'456', 456) >>> el.set('abc') False >>> el.u, el.value (u'abc', None) >>> el.set(None) True >>> el.u, el.value (u'', None) """ raise NotImplementedError() def set_flat(self, pairs, sep=u'_'): """Set element values from pairs, expanding the element tree as needed. Given a sequence of name/value tuples or a dict, build out a structured tree of value elements. """ if hasattr(pairs, 'items'): pairs = pairs.items() return self._set_flat(pairs, sep) def _set_flat(self, pairs, sep): raise NotImplementedError() def set_default(self): """set() the element to the schema default.""" raise NotImplementedError() @property def is_empty(self): """True if the element has no value.""" return True if (self.value is None and self.u == u'') else False def validate(self, state=None, recurse=True): """Assess the validity of this element and its children. :param state: optional, will be passed unchanged to all validator callables. :param recurse: if False, do not validate children. :returns: True or False Iterates through this element and all of its children, invoking each element's :meth:`schema.validate_element`. Each element will be visited twice: once heading down the tree, breadth-first, and again heading back up in reverse order. Returns True if all validations pass, False if one or more fail. """ if not recurse: down = self._validate(state, True) if down is Unevaluated: self.valid = down else: self.valid = bool(down) up = self._validate(state, False) # an Unevaluated ascent validator does not override the results # of descent validation if up is not Unevaluated: self.valid = bool(up) return self.valid valid = True elements, seen, queue = [], set(), collections.deque([self]) # descend breadth first, skipping any branches that return All* while queue: element = queue.popleft() if id(element) in seen: continue seen.add(id(element)) elements.append(element) validated = element._validate(state, True) if validated is Unevaluated: element.valid = validated else: element.valid = bool(validated) if valid: valid &= validated if validated is SkipAll or validated is SkipAllFalse: continue queue.extend(element.children) # back up, visiting only the elements that weren't skipped above for element in reversed(elements): validated = element._validate(state, False) # an Unevaluated ascent validator does not override the results # of descent validation if validated is Unevaluated: pass elif element.valid: element.valid = bool(validated) if valid: valid &= validated return bool(valid) def _validate(self, state, descending): """Run validation, transforming None into success. Internal.""" if descending: if self.validates_down: validators = getattr(self, self.validates_down, None) return validate_element(self, state, validators) else: if self.validates_up: validators = getattr(self, self.validates_up, None) return validate_element(self, state, validators) return Unevaluated @property def default_value(self): """A calculated "default" value. If :attr:`default_factory` is present, it will be called with the element as a single positional argument. The result of the call will be returned. Otherwise, returns :attr:`default`. When comparing an element's :attr:`value` to its default value, use this property in the comparison. """ if self.default_factory is not None: return self.default_factory(self) else: return self.default @property def x(self): """Sugar, the xml-escaped value of :attr:`.u`.""" global xml if xml is None: import xml.sax.saxutils return xml.sax.saxutils.escape(self.u) @property def xa(self): """Sugar, the xml-attribute-escaped value of :attr:`.u`.""" global xml if xml is None: import xml.sax.saxutils return xml.sax.saxutils.quoteattr(self.u)[1:-1] def __hash__(self): raise TypeError('%s object is unhashable', self.__class__.__name__) class Slot(object): """Marks a semi-visible Element-holding Element, like the 0 in list[0].""" def validate_element(element, state, validators): """Apply a set of validators to an element. :param element: a `~flatland.Element` :param state: may be None, an optional value of supplied to ``element.validate`` :param validators: an iterable of validation functions :return: a truth value If validators is empty or otherwise false, a fallback validation of ``not element.is_empty`` will be used. Empty but optional elements are considered valid. Emits :class:`flatland.signals.validator_validated` after each validator is tested. """ if element.is_empty and element.optional: return True if not validators: valid = not element.is_empty if validator_validated.receivers: validator_validated.send( NotEmpty, element=element, state=state, result=valid) return valid for fn in validators: valid = fn(element, state) if validator_validated.receivers: validator_validated.send( fn, element=element, state=state, result=valid) if valid is None: return False elif valid is Skip: return True elif not valid or valid is SkipAll: return valid return True
jek/flatland
flatland/schema/base.py
Python
mit
29,692
0.000404
# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .. import unittest from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache from mock import Mock class LruCacheTestCase(unittest.TestCase): def test_get_set(self): cache = LruCache(1) cache["key"] = "value" self.assertEquals(cache.get("key"), "value") self.assertEquals(cache["key"], "value") def test_eviction(self): cache = LruCache(2) cache[1] = 1 cache[2] = 2 self.assertEquals(cache.get(1), 1) self.assertEquals(cache.get(2), 2) cache[3] = 3 self.assertEquals(cache.get(1), None) self.assertEquals(cache.get(2), 2) self.assertEquals(cache.get(3), 3) def test_setdefault(self): cache = LruCache(1) self.assertEquals(cache.setdefault("key", 1), 1) self.assertEquals(cache.get("key"), 1) self.assertEquals(cache.setdefault("key", 2), 1) self.assertEquals(cache.get("key"), 1) cache["key"] = 2 # Make sure overriding works. self.assertEquals(cache.get("key"), 2) def test_pop(self): cache = LruCache(1) cache["key"] = 1 self.assertEquals(cache.pop("key"), 1) self.assertEquals(cache.pop("key"), None) def test_del_multi(self): cache = LruCache(4, 2, cache_type=TreeCache) cache[("animal", "cat")] = "mew" cache[("animal", "dog")] = "woof" cache[("vehicles", "car")] = "vroom" cache[("vehicles", "train")] = "chuff" self.assertEquals(len(cache), 4) self.assertEquals(cache.get(("animal", "cat")), "mew") self.assertEquals(cache.get(("vehicles", "car")), "vroom") cache.del_multi(("animal",)) self.assertEquals(len(cache), 2) self.assertEquals(cache.get(("animal", "cat")), None) self.assertEquals(cache.get(("animal", "dog")), None) self.assertEquals(cache.get(("vehicles", "car")), "vroom") self.assertEquals(cache.get(("vehicles", "train")), "chuff") # Man from del_multi say "Yes". def test_clear(self): cache = LruCache(1) cache["key"] = 1 cache.clear() self.assertEquals(len(cache), 0) class LruCacheCallbacksTestCase(unittest.TestCase): def test_get(self): m = Mock() cache = LruCache(1) cache.set("key", "value") self.assertFalse(m.called) cache.get("key", callbacks=[m]) self.assertFalse(m.called) cache.get("key", "value") self.assertFalse(m.called) cache.set("key", "value2") self.assertEquals(m.call_count, 1) cache.set("key", "value") self.assertEquals(m.call_count, 1) def test_multi_get(self): m = Mock() cache = LruCache(1) cache.set("key", "value") self.assertFalse(m.called) cache.get("key", callbacks=[m]) self.assertFalse(m.called) cache.get("key", callbacks=[m]) self.assertFalse(m.called) cache.set("key", "value2") self.assertEquals(m.call_count, 1) cache.set("key", "value") self.assertEquals(m.call_count, 1) def test_set(self): m = Mock() cache = LruCache(1) cache.set("key", "value", callbacks=[m]) self.assertFalse(m.called) cache.set("key", "value") self.assertFalse(m.called) cache.set("key", "value2") self.assertEquals(m.call_count, 1) cache.set("key", "value") self.assertEquals(m.call_count, 1) def test_pop(self): m = Mock() cache = LruCache(1) cache.set("key", "value", callbacks=[m]) self.assertFalse(m.called) cache.pop("key") self.assertEquals(m.call_count, 1) cache.set("key", "value") self.assertEquals(m.call_count, 1) cache.pop("key") self.assertEquals(m.call_count, 1) def test_del_multi(self): m1 = Mock() m2 = Mock() m3 = Mock() m4 = Mock() cache = LruCache(4, 2, cache_type=TreeCache) cache.set(("a", "1"), "value", callbacks=[m1]) cache.set(("a", "2"), "value", callbacks=[m2]) cache.set(("b", "1"), "value", callbacks=[m3]) cache.set(("b", "2"), "value", callbacks=[m4]) self.assertEquals(m1.call_count, 0) self.assertEquals(m2.call_count, 0) self.assertEquals(m3.call_count, 0) self.assertEquals(m4.call_count, 0) cache.del_multi(("a",)) self.assertEquals(m1.call_count, 1) self.assertEquals(m2.call_count, 1) self.assertEquals(m3.call_count, 0) self.assertEquals(m4.call_count, 0) def test_clear(self): m1 = Mock() m2 = Mock() cache = LruCache(5) cache.set("key1", "value", callbacks=[m1]) cache.set("key2", "value", callbacks=[m2]) self.assertEquals(m1.call_count, 0) self.assertEquals(m2.call_count, 0) cache.clear() self.assertEquals(m1.call_count, 1) self.assertEquals(m2.call_count, 1) def test_eviction(self): m1 = Mock(name="m1") m2 = Mock(name="m2") m3 = Mock(name="m3") cache = LruCache(2) cache.set("key1", "value", callbacks=[m1]) cache.set("key2", "value", callbacks=[m2]) self.assertEquals(m1.call_count, 0) self.assertEquals(m2.call_count, 0) self.assertEquals(m3.call_count, 0) cache.set("key3", "value", callbacks=[m3]) self.assertEquals(m1.call_count, 1) self.assertEquals(m2.call_count, 0) self.assertEquals(m3.call_count, 0) cache.set("key3", "value") self.assertEquals(m1.call_count, 1) self.assertEquals(m2.call_count, 0) self.assertEquals(m3.call_count, 0) cache.get("key2") self.assertEquals(m1.call_count, 1) self.assertEquals(m2.call_count, 0) self.assertEquals(m3.call_count, 0) cache.set("key1", "value", callbacks=[m1]) self.assertEquals(m1.call_count, 1) self.assertEquals(m2.call_count, 0) self.assertEquals(m3.call_count, 1) class LruCacheSizedTestCase(unittest.TestCase): def test_evict(self): cache = LruCache(5, size_callback=len) cache["key1"] = [0] cache["key2"] = [1, 2] cache["key3"] = [3] cache["key4"] = [4] self.assertEquals(cache["key1"], [0]) self.assertEquals(cache["key2"], [1, 2]) self.assertEquals(cache["key3"], [3]) self.assertEquals(cache["key4"], [4]) self.assertEquals(len(cache), 5) cache["key5"] = [5, 6] self.assertEquals(len(cache), 4) self.assertEquals(cache.get("key1"), None) self.assertEquals(cache.get("key2"), None) self.assertEquals(cache["key3"], [3]) self.assertEquals(cache["key4"], [4]) self.assertEquals(cache["key5"], [5, 6])
TribeMedia/synapse
tests/util/test_lrucache.py
Python
apache-2.0
7,584
0
# 1、`if __name__ == "__main__":` ''' __name__是指示当前py文件调用方式的方法。 如果它等于"__main__"就表示是直接执行,如果不是,则用来被别的文件调用。 一般写在文件的最后。 查看format.py和wordsCount.py的布局 ''' # 2、函数 ''' 查看format.py中的formatLines函数 def xxx(): # 函数体 ''' # 3、if条件语句 ''' if xx: # xxx elif xxx: # xxx elif xxx: # xxx else: # xxx `else` 表示剩下的所有情况,该分支需要放置到最后,可以没有该分支 `elif` 可以有多个,也可以只有一个,也可以没有 ''' # 4、列表 ''' 列表是中括号包裹,并以逗号分隔的一系列值的集合,举例: ''' numList = [3, 4, 5, 6, 7] strList = ['he ', 'is ', 'a ', 'dog'] dList = [[3,4], [5,6]] # 列表组成的类表 # 4、for循环 ''' 对列表numList遍历,并打印出所有的值 ''' for i in numList: print i # 5、文件读写 infile = open(r'd:\\a.txt', 'r') # 'r' 表示读取文件;infile代表打开的文件 lines = infile.readlines() # 读取文件所有行,并保存以行为单位保存在列表(list类型)lines中 infile.close() # 关闭文件 outfile = open(r'd:\\outfile.txt', 'w') # 'r' 表示读取文件;outfile代表将要写内容的文件 outfile.write('hello world' + '\n') # 向文件中写入内容 outfile.close() # 关闭文件 # 6、练习 ''' 对文档words.txt,分别统计以小写字母a、b、c开头的单词的个数, 在wordsCount.py的基础上开发 注:这里涉及到读文件,读出来的内容是一个字符串的数组,然后统计需要通过遍历方式进行 '''
inkfountain/learn-py-a-little
lesson_file/lesson.py
Python
gpl-2.0
1,662
0.011044
# Created By: Virgil Dupras # Created On: 2006/05/02 # Copyright 2015 Hardcoded Software (http://www.hardcoded.net) # # This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.gnu.org/licenses/gpl-3.0.html import io from xml.etree import ElementTree as ET from pytest import raises from hscommon.testutil import eq_ from ..ignore import * def test_empty(): il = IgnoreList() eq_(0,len(il)) assert not il.AreIgnored('foo','bar') def test_simple(): il = IgnoreList() il.Ignore('foo','bar') assert il.AreIgnored('foo','bar') assert il.AreIgnored('bar','foo') assert not il.AreIgnored('foo','bleh') assert not il.AreIgnored('bleh','bar') eq_(1,len(il)) def test_multiple(): il = IgnoreList() il.Ignore('foo','bar') il.Ignore('foo','bleh') il.Ignore('bleh','bar') il.Ignore('aybabtu','bleh') assert il.AreIgnored('foo','bar') assert il.AreIgnored('bar','foo') assert il.AreIgnored('foo','bleh') assert il.AreIgnored('bleh','bar') assert not il.AreIgnored('aybabtu','bar') eq_(4,len(il)) def test_clear(): il = IgnoreList() il.Ignore('foo','bar') il.Clear() assert not il.AreIgnored('foo','bar') assert not il.AreIgnored('bar','foo') eq_(0,len(il)) def test_add_same_twice(): il = IgnoreList() il.Ignore('foo','bar') il.Ignore('bar','foo') eq_(1,len(il)) def test_save_to_xml(): il = IgnoreList() il.Ignore('foo','bar') il.Ignore('foo','bleh') il.Ignore('bleh','bar') f = io.BytesIO() il.save_to_xml(f) f.seek(0) doc = ET.parse(f) root = doc.getroot() eq_(root.tag, 'ignore_list') eq_(len(root), 2) eq_(len([c for c in root if c.tag == 'file']), 2) f1, f2 = root[:] subchildren = [c for c in f1 if c.tag == 'file'] + [c for c in f2 if c.tag == 'file'] eq_(len(subchildren), 3) def test_SaveThenLoad(): il = IgnoreList() il.Ignore('foo', 'bar') il.Ignore('foo', 'bleh') il.Ignore('bleh', 'bar') il.Ignore('\u00e9', 'bar') f = io.BytesIO() il.save_to_xml(f) f.seek(0) il = IgnoreList() il.load_from_xml(f) eq_(4,len(il)) assert il.AreIgnored('\u00e9','bar') def test_LoadXML_with_empty_file_tags(): f = io.BytesIO() f.write(b'<?xml version="1.0" encoding="utf-8"?><ignore_list><file><file/></file></ignore_list>') f.seek(0) il = IgnoreList() il.load_from_xml(f) eq_(0,len(il)) def test_AreIgnore_works_when_a_child_is_a_key_somewhere_else(): il = IgnoreList() il.Ignore('foo','bar') il.Ignore('bar','baz') assert il.AreIgnored('bar','foo') def test_no_dupes_when_a_child_is_a_key_somewhere_else(): il = IgnoreList() il.Ignore('foo','bar') il.Ignore('bar','baz') il.Ignore('bar','foo') eq_(2,len(il)) def test_iterate(): #It must be possible to iterate through ignore list il = IgnoreList() expected = [('foo','bar'),('bar','baz'),('foo','baz')] for i in expected: il.Ignore(i[0],i[1]) for i in il: expected.remove(i) #No exception should be raised assert not expected #expected should be empty def test_filter(): il = IgnoreList() il.Ignore('foo','bar') il.Ignore('bar','baz') il.Ignore('foo','baz') il.Filter(lambda f,s: f == 'bar') eq_(1,len(il)) assert not il.AreIgnored('foo','bar') assert il.AreIgnored('bar','baz') def test_save_with_non_ascii_items(): il = IgnoreList() il.Ignore('\xac', '\xbf') f = io.BytesIO() try: il.save_to_xml(f) except Exception as e: raise AssertionError(str(e)) def test_len(): il = IgnoreList() eq_(0,len(il)) il.Ignore('foo','bar') eq_(1,len(il)) def test_nonzero(): il = IgnoreList() assert not il il.Ignore('foo','bar') assert il def test_remove(): il = IgnoreList() il.Ignore('foo', 'bar') il.Ignore('foo', 'baz') il.remove('bar', 'foo') eq_(len(il), 1) assert not il.AreIgnored('foo', 'bar') def test_remove_non_existant(): il = IgnoreList() il.Ignore('foo', 'bar') il.Ignore('foo', 'baz') with raises(ValueError): il.remove('foo', 'bleh')
stuckj/dupeguru
core/tests/ignore_test.py
Python
gpl-3.0
4,306
0.019508
import time import RPi.GPIO as GPIO from flask import Flask, render_template # GPIO and Sensors ============================================================ # Objects to represent sensors used to get water level class WaterLevelSensor: # how high the sensor is above the top of the fish tank offset = 0 def __init__(self, echo, trig): self.echo_pin = echo self.trig_pin = trig GPIO.setup(self.trig_pin, GPIO.OUT, initial=0) GPIO.setup(self.echo_pin, GPIO.IN) # gets the time it took for the sound to return, in microseconds def pulse_in(self): GPIO.output(self.trig_pin, 1) time.sleep(0.05) GPIO.output(self.trig_pin, 0) start = time.clock() GPIO.wait_for_edge(self.echo_pin, GPIO.RISING) return time.clock() - start # returns how far away the water is from the top of the tank in centimeters def read_water_level(self): # the speed of sound is ~343 m/s val = self.pulse_in() * 0.000001715 # ((1 / 1,000,000) * 343) / 2,000 return val - ofset # Webpage ===================================================================== app = Flask(__name__) # Posts new readings to the webpage @route('/') def display_info(): reading = 0 render_template('info.html', height=reading) if __name__ == '__main__': GPIO.setmode(GPIO.BCM) app.run('127.0.0.1', 8000)
tvictor20/tvictor-advprog
aquaponics/app.py
Python
gpl-3.0
1,404
0.003561
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ This module defines how cells are stored as tunacell's objects """ from __future__ import print_function import numpy as np import warnings import treelib as tlib from tunacell.base.observable import Observable, FunctionalObservable from tunacell.base.datatools import (Coordinates, compute_rates, extrapolate_endpoints, derivative, logderivative, ExtrapolationError) class CellError(Exception): pass class CellChildsError(CellError): pass class CellParentError(CellError): pass class CellDivisionError(CellError): pass class Cell(tlib.Node): """General class to handle cell data structure. Inherits from treelib.Node class to facilitate tree building. Parameters ---------- identifier : str cell identifier container : :class:`Container` instance container to which cell belongs Attributes ---------- container : :class:`Container` instance container to chich cell belongs childs : list of :class:`Cell` instances daughter cells of current cell parent : :class:`Cell` instance mother cell of current cell birth_time : float (default None) time of cell birth (needs to be computed) division_time : float (default None) time of cell division (needs to be computed) Methods ------- set_division_events() computes birth/division times when possible build(obs) builds timeseries, uses one of the following methods depending on obs build_timelapse(obs) builds and stores timeseries associated to obs, in 'dynamics' mode build_cyclized(obs) builds and stores cell-cycle value associated to obs, not in 'dynamics' mode """ def __init__(self, identifier=None, container=None): tlib.Node.__init__(self, identifier=identifier) self._childs = [] self._parent = None self._birth_time = None self._division_time = None self._sdata = {} # dictionary to contain computed data self._protected_against_build = set() # set of obs not to re-build self.container = container # point to Container instance # cells are built from a specific container instance # container can be a given field of view, a channel, a microcolony, ... return # We add few definitions to be able to chain between Cell instances @property def childs(self): "Get list of child instances." return self._childs @childs.setter def childs(self, value): if value is None: self._childs = [] elif isinstance(value, list): for item in value: self.childs = item elif isinstance(value, Cell): self._childs.append(value) else: raise CellChildsError @property def parent(self): "Get parent instance." return self._parent @parent.setter def parent(self, pcell): if pcell is None: self._parent = None elif isinstance(pcell, Cell): self._parent = pcell else: raise CellParentError @property def birth_time(self): "Get cell cycle start time. See below for Setter." return self._birth_time @birth_time.setter def birth_time(self, value): "Set cell cycle start time. See above for Getter." self._birth_time = value @property def division_time(self): "Get cell cycle end time. See below for Setter." return self._division_time @division_time.setter def division_time(self, value): "Set cell cycle end time. See above for Getter." if self.birth_time is not None: if value < self.birth_time: raise CellDivisionError self._division_time = value def set_division_event(self): "method to call when parent is identified" previous_frame = None if (self.parent is not None) and (self.parent.data is not None): previous_frame = self.parent.data['time'][-1] first_frame = None if self.data is not None: first_frame = self.data['time'][0] if previous_frame is not None and first_frame is not None: div_time = (previous_frame + first_frame)/2. # halfway self.birth_time = div_time self.parent.division_time = div_time return def __repr__(self): cid = str(self.identifier) if self.parent: pid = str(self.parent.identifier) else: pid = '-' if self.childs: ch = ','.join(['{}'.format(c.identifier) for c in self.childs]) else: ch = '-' return cid+';p:'+pid+';ch:'+ch def info(self): dic = {} dic['a. Identifier'] = '{}'.format(self.identifier) pid = 'None' if self.parent: pid = '{}'.format(self.parent.identifier) dic['b. Parent id'] = pid chids = 'None' if self.childs: chids = ', '.join(['{}'.format(ch.identifier) for ch in self.childs]) dic['c. Childs'] = chids dic['d. Birth time'] = '{}'.format(self.birth_time) dic['e. Division time'] = '{}'.format(self.division_time) if self.data is not None: dic['f. N_frames'] = '{}'.format(len(self.data)) return dic def protect_against_build(self, obs): """Protect current cell against building obs array/value""" self._protected_against_build.add(obs) return def build(self, obs): """Builds timeseries""" if obs in self._protected_against_build: return if isinstance(obs, FunctionalObservable): # first build every single Observable for item in obs.observables: self.build(item) arrays = [self._sdata[item.label] for item in obs.observables] self._sdata[obs.label] = obs.f(*arrays) elif isinstance(obs, Observable): if obs.mode == 'dynamics': self.build_timelapse(obs) else: self.compute_cyclized(obs) else: raise TypeError('obs must be of type Observable or FunctionalObservable') def build_timelapse(self, obs): """Builds timeseries corresponding to observable of mode 'dynamics'. Result is an array of same length as time array, stored in a dictionary _sdata, which keys are obs.label. When using sliding windows, estimate in a given cell actualize data in its parent cell, if and only if it has not been actualized before (check disjoint time intervals). Parameters ---------- obs : Observable instance mode must be 'dynamics' Note ----- Some observables carry the 'local_fit' option True. In this case, local fits over shifting time-windows are performed. If one would keep only a given cell's data, then the constraints on shifting time-window would let some 'empty' times, at which no evaluation can be performed. This is solved by getting data from the cell's parent cell's data. This operation computes time-window fiited data in the cell's parent cycle. Two precautions must then be taken: 1. a given cell's data must be used only once for evaluating parent cell's data, 2. when data has been used from one daughter cell, concatenate the current cell's evaluated data to it. .. warning:: For some computations, the time interval between consecutive acquisitions is needed. If it's defined in the container or the experiment metadata, this parameter will be imported; otherwise if there are at least 2 consecutive values, it will be inferred from data (at the risk of making mistakes if there are too many missing values) """ label = str(obs.label) raw = obs.raw coords = Coordinates(self.data['time'], self.data[raw]) if self.parent is not None and len(self.parent.data) > 0: anteriors = Coordinates(self.parent.data['time'], self.parent.data[raw]) else: anteriors = Coordinates(np.array([], dtype=float), np.array([], dtype=float)) # if empty, return empty array of appropriate type if len(self.data) == 0: # there is no data, but it has some dtype return Coordinates(np.array([], dtype=float), np.array([], dtype=float)) dt = self.container.period if dt is None: # automatically finds dt if len(self.data) > 1: arr = self.data['time'] time_increments = arr[1:] - arr[:-1] dt = np.round(np.amin(np.abs(time_increments)), decimals=2) # case : no local fit, use data, or finite differences if not obs.local_fit: if obs.differentiate: if obs.scale == 'linear': new = derivative(coords) elif obs.scale == 'log': new = logderivative(coords) else: new = coords self._sdata[label] = new.y # case : local estimates using compute_rates else: r, f, ar, af, xx, yy = compute_rates(coords.x, coords.y, x_break=self.birth_time, anterior_x=anteriors.x, anterior_y=anteriors.y, scale=obs.scale, time_window=obs.time_window, dt=dt, join_points=obs.join_points) if obs.differentiate: to_cell = r to_parent = ar if len(ar) != len(anteriors.x): print('This is awkward') else: to_cell = f to_parent = af self._sdata[label] = to_cell if self.parent is not None and (not np.all(np.isnan(to_parent))): if label not in self.parent._sdata.keys(): self.parent._sdata[label] = to_parent else: existing = self.parent._sdata[label] # if existing is nan, try to put addedum values self.parent._sdata[label] = np.where(np.isnan(existing), to_parent, existing) return def compute_cyclized(self, obs): """Computes observable when mode is different from 'dynamics'. Parameters ---------- obs : Observable instance mode must be different from 'dynamics' Raises ------ ValueError when Observable mode is 'dynamics' Note ---- To compute a cell-cycle observable (e.g. birth growth rate), it is necessary to know the value of the timelapse counterpart (e.g. growth rate here). The timelapse observable may work by joining values at divisions, and hence a single call to Cell.build_timelapse() will result in a different result array than when it has beenalso called in a daughter cell (potentially affecting values toward the end of current cell cycle). Hence, in that circumstances when continuity is used to join timeseries at divisions, enhancing results with fitting over sliding windows, it is the user's task to compute first the timelapse observable over the entire lineage, and only then, evaluate cell-cycle values. This is why the function below tries first to read an already present array from timelapse counterpart, and only if it fails will it compute it using only this current cell data. """ scale = obs.scale npts = obs.join_points label = obs.label if obs.mode == 'dynamics': raise ValueError('Called build_cyclized for dynamics mode') # associate timelapse counterpart cobs = obs.as_timelapse() clabel = cobs.label time = self.data['time'] # if it has been computed already, the clabel key exists in sdata try: array = self._sdata[clabel] # otherwise compute the timelapse counterpart except KeyError: self.build_timelapse(cobs) array = self._sdata[clabel] # get value try: if obs.mode == 'birth': value = extrapolate_endpoints(time, array, self.birth_time, scale=scale, join_points=npts) elif obs.mode == 'division': value = extrapolate_endpoints(time, array, self.division_time, scale=scale, join_points=npts) elif 'net-increase' in obs.mode: dval = extrapolate_endpoints(time, array, self.division_time, scale=scale, join_points=npts) bval = extrapolate_endpoints(time, array, self.birth_time, scale=scale, join_points=npts) if obs.mode == 'net-increase-additive': value = dval - bval elif obs.mode == 'net-increase-multiplicative': value = dval/bval elif obs.mode == 'average': value = np.nanmean(array) elif obs.mode == 'rate': if len(array) < 2: value = np.nan # not enough values to estimate rate if obs.scale == 'log': array = np.log(array) value, intercept = np.polyfit(time, array, 1) except ExtrapolationError as err: # msg = '{}'.format(err) # warnings.warn(msg) value = np.nan # missing information self._sdata[label] = value return def _disjoint_time_sets(ts1, ts2): if len(ts1) == 0 or len(ts2) == 0: return True min1, min2 = map(np.nanmin, [ts1, ts2]) max1, max2 = map(np.nanmax, [ts1, ts2]) return max1 < min2 or max2 < min1 def filiate_from_bpointer(cells): """Build in place parent/childs attributes in a set of filiated cells Parameters ---------- cells : list of Cell instances """ for cell in cells: childs = [] for cc in cells: if cc.bpointer == cell.identifier: childs.append(cc) cc.parent = cell cc.set_division_event() cell.childs = childs
LeBarbouze/tunacell
tunacell/base/cell.py
Python
mit
15,188
0.00079
from coinpy.model.scripts.opcodes import OP_2DIV, OP_2MUL, OP_AND, OP_CAT,\ OP_DIV, OP_INVERT, OP_LSHIFT, OP_LEFT, OP_MOD, OP_OR, OP_RIGHT, OP_RSHIFT,\ OP_SUBSTR, OP_XOR, OP_MUL DISABLED_OPCODES=[OP_CAT, OP_SUBSTR, OP_LEFT, OP_RIGHT, OP_INVERT, OP_AND, OP_OR, OP_XOR, OP_2MUL, OP_2DIV, OP_MUL, OP_DIV, OP_MOD, OP_LSHIFT, OP_RSHIFT]
sirk390/coinpy
coinpy-lib/src/coinpy/lib/vm/opcode_impl/disabled.py
Python
lgpl-3.0
341
0.008798
############################################################################## # # Copyright (C) Zenoss, Inc. 2009, 2011, all rights reserved. # # This content is made available according to terms specified in # License.zenoss under the directory where your Zenoss product is installed. # ############################################################################## __doc__ = """zenbatchload zenbatchload loads a list of devices read from a file. """ import sys import re from traceback import format_exc import socket import Globals from ZODB.POSException import ConflictError from ZODB.transact import transact from zope.component import getUtility from zope.event import notify from zExceptions import BadRequest from ZPublisher.Converters import type_converters from Products.ZenModel.interfaces import IDeviceLoader from Products.ZenUtils.ZCmdBase import ZCmdBase from Products.ZenModel.Device import Device from Products.ZenRelations.ZenPropertyManager import iszprop from Products.ZenModel.ZenModelBase import iscustprop from Products.ZenEvents.ZenEventClasses import Change_Add from Products.Zuul.catalog.events import IndexingEvent from Products.ZenUtils.Utils import unused # We import DateTime so that we can set properties of type DateTime in the batchload from DateTime import DateTime unused(DateTime) from zenoss.protocols.protobufs.zep_pb2 import SEVERITY_INFO, SEVERITY_ERROR METHODS_TO_SETTINGS = { 'setManageIp': 'manageIp', 'setPerformanceMonitor': 'performanceMonitor', 'setTitle': 'title', 'setHWTag': 'tag', 'setHWSerialNumber': 'serialNumber', 'setProdState': 'productionState', 'setPriority': 'priority', 'setGroups': 'groupPaths', 'setSystems': 'systemPaths', # these don't have methods but were added for completeness 'setRackSlot': 'rackSlot', 'setComments': 'comments', # TODO: setHWProduct and setOSProduct (they take multiple parameters) } class BatchDeviceLoader(ZCmdBase): """ Base class wrapping around dmd.DeviceLoader """ sample_configs = """# # Example zenbatchloader device file # # This file is formatted with one entry per line, like this: # # /Devices/device_class_name Python-expression # hostname Python-expression # # For organizers (ie the /Devices path), the Python-expression # is used to define defaults to be used for devices listed # after the organizer. The defaults that can be specified are: # # * loader arguments (use the --show_options flag to show these) # # * zProperties (from a device, use the 'Configuration Properties' # menu item to see the available ones.) # # NOTE: new zProperties *cannot* be created through this file # # * cProperties (from a device, use the 'Custom Properties' # menu item to see the available ones.) # # NOTE: new cProperties *cannot* be created through this file # # The Python-expression is used to create a dictionary of settings. # device_settings = eval( 'dict(' + python-expression + ')' ) # # Setting locations /Locations/Canada address="Canada" /Locations/Canada/Alberta address="Alberta, Canada" /Locations/Canada/Alberta/Calgary address="Calgary, Alberta, Canada" # If no organizer is specified at the beginning of the file, # defaults to the /Devices/Discovered device class. device0 comments="A simple device" # All settings must be seperated by a comma. device1 comments="A simple device", zSnmpCommunity='blue', zSnmpVer='v1' # Notes for this file: # * Oraganizer names *must* start with '/' # /Devices/Server/Linux zSnmpPort=1543 # Python strings can use either ' or " -- there's no difference. # As a special case, it is also possible to specify the IP address linux_device1 setManageIp='10.10.10.77', zSnmpCommunity='blue', zSnmpVer="v2c" # A '\' at the end of the line allows you to place more # expressions on a new line. Don't forget the comma... linux_device2 zLinks="<a href='http://example.org'>Support site</a>", \ zTelnetEnable=True, \ zTelnetPromptTimeout=15.3 # A new organizer drops all previous settings, and allows # for new ones to be used. Settings do not span files. /Devices/Server/Windows zWinUser="administrator", zWinPassword='fred' # Bind templates windows_device1 zDeviceTemplates=[ 'Device', 'myTemplate' ] # Override the default from the organizer setting. windows_device2 zWinUser="administrator", zWinPassword='thomas', setProdState=500 # Apply other settings to the device settingsDevice setManageIp='10.10.10.77', setLocation="123 Elm Street", \ setSystems=['/mySystems'], setPerformanceMonitor='remoteCollector1', \ setHWSerialNumber="abc123456789", setGroups=['/myGroup'], \ setHWProduct=('myproductName','manufacturer'), setOSProduct=('OS Name','manufacturer') # If the device or device class contains a space, then it must be quoted (either ' or ") "/Server/Windows/WMI/Active Directory/2008" # Now, what if we have a device that isn't really a device, and requires # a special loader? # The 'loader' setting requires a registered utility, and 'loader_arg_keys' is # a list from which any other settings will be passed into the loader callable. # # Here is a commmented-out example of how a VMware endpoint might be added: # #/Devices/VMware loader='vmware', loader_arg_keys=['host', 'username', 'password', 'useSsl', 'id'] #esxwin2 id='esxwin2', host='esxwin2.zenoss.loc', username='testuser', password='password', useSsl=True # Apply custom schema properties (c-properties) to a device windows_device7 cDateTest='2010/02/28' # # The following are wrapper methods that specifically set values on a device: # # setManageIp # setPerformanceMonitor # setTitle # setHWTag # setHWSerialNumber # setProdState # setPriority # setGroups # setSystems # setRackSlot # setComments # """ def __init__(self, *args, **kwargs): ZCmdBase.__init__(self, *args, **kwargs) self.defaults = {} self.loader = self.dmd.DeviceLoader.loadDevice self.fqdn = socket.getfqdn() self.baseEvent = dict( device=self.fqdn, component='', agent='zenbatchload', monitor='localhost', manager=self.fqdn, severity=SEVERITY_ERROR, # Note: Change_Add events get sent to history by the event class' Zen property eventClass=Change_Add, ) # Create the list of options we want people to know about self.loader_args = dict.fromkeys( self.loader.func_code.co_varnames ) unsupportable_args = [ 'REQUEST', 'device', 'self', 'xmlrpc', 'e', 'handler', ] for opt in unsupportable_args: if opt in self.loader_args: del self.loader_args[opt] def loadDeviceList(self, args=None): """ Read through all of the files listed as arguments and return a list of device entries. @parameter args: list of filenames (uses self.args is this is None) @type args: list of strings @return: list of device specifications @rtype: list of dictionaries """ if args is None: args = self.args device_list = [] for filename in args: if filename.strip() != '': try: data = open(filename,'r').readlines() except IOError: msg = "Unable to open the file '%s'" % filename self.reportException(msg) continue temp_dev_list = self.parseDevices(data) if temp_dev_list: device_list += temp_dev_list return device_list def applyZProps(self, device, device_specs): """ Apply zProperty settings (if any) to the device. @parameter device: device to modify @type device: DMD device object @parameter device_specs: device creation dictionary @type device_specs: dictionary """ self.log.debug( "Applying zProperties..." ) # Returns a list of (key, value) pairs. # Convert it to a dictionary. dev_zprops = dict( device.zenPropertyItems() ) for zprop, value in device_specs.items(): self.log.debug( "Evaluating zProperty <%s -> %s> on %s" % (zprop, value, device.id) ) if not iszprop(zprop): self.log.debug( "Evaluating zProperty <%s -> %s> on %s: not iszprop()" % (zprop, value, device.id) ) continue if zprop in dev_zprops: try: self.log.debug( "Setting zProperty <%s -> %s> on %s (currently set to %s)" % ( zprop, value, device.id, getattr(device, zprop, 'notset')) ) device.setZenProperty(zprop, value) except BadRequest: self.log.warn( "Object %s zproperty %s is invalid or duplicate" % ( device.titleOrId(), zprop) ) except Exception, ex: self.log.warn( "Object %s zproperty %s not set (%s)" % ( device.titleOrId(), zprop, ex) ) self.log.debug( "Set zProperty <%s -> %s> on %s (now set to %s)" % ( zprop, value, device.id, getattr(device, zprop, 'notset')) ) else: self.log.warn( "The zproperty %s doesn't exist in %s" % ( zprop, device_specs.get('deviceName', device.id))) def applyCustProps(self, device, device_specs): """ Custom schema properties """ self.log.debug( "Applying custom schema properties..." ) dev_cprops = device.custPropertyMap() for cprop, value in device_specs.items(): if not iscustprop(cprop): continue matchProps = [prop for prop in dev_cprops if prop['id'] == cprop] if matchProps: ctype = matchProps[0]['type'] if ctype == 'password': ctype = 'string' if ctype in type_converters and value: value = type_converters[ctype](value) device.setZenProperty( cprop, value) else: self.log.warn( "The cproperty %s doesn't exist in %s" % ( cprop, device_specs.get('deviceName', device.id))) def addAllLGSOrganizers(self, device_specs): location = device_specs.get('setLocation') if location: self.addLGSOrganizer('Locations', (location,) ) systems = device_specs.get('setSystems') if systems: if not isinstance(systems, list) and not isinstance(systems, tuple): systems = (systems,) self.addLGSOrganizer('Systems', systems) groups = device_specs.get('setGroups') if groups: if not isinstance(groups, list) and not isinstance(groups, tuple): groups = (groups,) self.addLGSOrganizer('Groups', groups) def addLGSOrganizer(self, lgsType, paths=[]): """ Add any new locations, groups or organizers """ prefix = '/zport/dmd/' + lgsType base = getattr(self.dmd, lgsType) if hasattr(base, 'sync'): base.sync() existing = [x.getPrimaryUrlPath().replace(prefix, '') \ for x in base.getSubOrganizers()] for path in paths: if path in existing: continue try: base.manage_addOrganizer(path) except BadRequest: pass @transact def addOrganizer(self, device_specs): """ Add any organizers as required, and apply zproperties to them. """ path = device_specs.get('devicePath') baseOrg = path.split('/', 2)[1] base = getattr(self.dmd, baseOrg, None) if base is None: self.log.error("The base of path %s (%s) does not exist -- skipping", baseOrg, path) return try: org = base.getDmdObj(path) except KeyError: try: self.log.info("Creating organizer %s", path) @transact def inner(): base.manage_addOrganizer(path) inner() org = base.getDmdObj(path) except IOError: self.log.error("Unable to create organizer! Is Rabbit up and configured correctly?") sys.exit(1) self.applyZProps(org, device_specs) self.applyCustProps(org, device_specs) self.applyOtherProps(org, device_specs) def applyOtherProps(self, device, device_specs): """ Apply non-zProperty settings (if any) to the device. @parameter device: device to modify @type device: DMD device object @parameter device_specs: device creation dictionary @type device_specs: dictionary """ self.log.debug( "Applying other properties..." ) internalVars = [ 'deviceName', 'devicePath', 'comments', 'loader', 'loader_arg_keys', ] internalVars.extend(METHODS_TO_SETTINGS.itervalues()) @transact def setNamedProp(org, name, description): setattr(org, name, description) for functor, value in device_specs.items(): if iszprop(functor) or iscustprop(functor) or functor in internalVars: continue # Special case for organizers which can take a description if functor in ('description', 'address'): if hasattr(device, functor): setNamedProp(device, functor, value) continue try: self.log.debug("For %s, calling device.%s(%s)", device.id, functor, value) func = getattr(device, functor, None) if func is None or not callable(func): self.log.warn("The function '%s' for device %s is not found.", functor, device.id) elif isinstance(value, (list, tuple)): # The function either expects a list or arguments try: # arguments func(*value) except TypeError: # Try as a list func(value) else: func(value) except ConflictError: raise except Exception: msg = "Device %s device.%s(%s) failed" % (device.id, functor, value) self.reportException(msg, device.id) def runLoader(self, loader, device_specs): """ It's up to the loader now to figure out what's going on. @parameter loader: device loader @type loader: callable @parameter device_specs: device entries @type device_specs: dictionary """ argKeys = device_specs.get('loader_arg_keys', []) loader_args = {} for key in argKeys: if key in device_specs: loader_args[key] = device_specs[key] result = loader().load_device(self.dmd, **loader_args) # If the loader returns back a device object, carry # on processing if isinstance(result, Device): return result return None def processDevices(self, device_list): """ Read the input and process the devices * create the device entry * set zproperties * set custom schema properties * model the device @parameter device_list: list of device entries @type device_list: list of dictionaries @return: status of device loading @rtype: dictionary """ def transactional(f): return f if self.options.nocommit else transact(f) processed = {'processed':0, 'errors':0} @transactional def _process(device_specs): # Get the latest bits self.dmd.zport._p_jar.sync() loaderName = device_specs.get('loader') if loaderName is not None: try: orgName = device_specs['devicePath'] organizer = self.dmd.getObjByPath('dmd' + orgName) deviceLoader = getUtility(IDeviceLoader, loaderName, organizer) devobj = self.runLoader(deviceLoader, device_specs) except ConflictError: raise except Exception: devName = device_specs.get('device_specs', 'Unkown Device') msg = "Ignoring device loader issue for %s" % devName self.reportException(msg, devName, specs=str(device_specs)) processed['errors'] += 1 return else: devobj = self.getDevice(device_specs) deviceLoader = None if devobj is None: if deviceLoader is not None: processed['processed'] += 1 else: self.addAllLGSOrganizers(device_specs) self.applyZProps(devobj, device_specs) self.applyCustProps(devobj, device_specs) self.applyOtherProps(devobj, device_specs) if not self.options.nocommit and isinstance(devobj, Device): notify(IndexingEvent(devobj)) return devobj @transactional def _snmp_community(device_specs, devobj): # Discover the SNMP community if it isn't explicitly set. if 'zSnmpCommunity' not in device_specs: self.log.debug('Discovering SNMP version and community') devobj.manage_snmpCommunity() @transactional def _model(devobj): try: devobj.collectDevice(setlog=self.options.showModelOutput) except ConflictError: raise except Exception, ex: msg = "Modeling error for %s" % devobj.id self.reportException(msg, devobj.id, exception=str(ex)) processed['errors'] += 1 processed['processed'] += 1 for device_specs in device_list: devobj = _process(device_specs) # We need to commit in order to model, so don't bother # trying to model unless we can do both if devobj and not self.options.nocommit and not self.options.nomodel: _snmp_community(device_specs, devobj) _model(devobj) processed['total'] = len(device_list) self.reportResults(processed) return processed def reportException(self, msg, devName='', **kwargs): """ Report exceptions back to the the event console """ self.log.exception(msg) if not self.options.nocommit: evt = self.baseEvent.copy() evt.update(dict( summary=msg, traceback=format_exc() )) evt.update(kwargs) if devName: evt['device'] = devName self.dmd.ZenEventManager.sendEvent(evt) def reportResults(self, processed): """ Report the success + total counts from loading devices. """ msg = "Modeled %d of %d devices, with %d errors" % ( processed['processed'], processed['total'], processed['errors'] ) self.log.info(msg) if not self.options.nocommit: evt = self.baseEvent.copy() evt.update(dict( severity=SEVERITY_INFO, summary=msg, modeled=processed['processed'], errors=processed['errors'], total=processed['total'], )) self.dmd.ZenEventManager.sendEvent(evt) def notifyNewDeviceCreated(self, deviceName): """ Report that we added a new device. """ if not self.options.nocommit: evt = self.baseEvent.copy() evt.update(dict( severity=SEVERITY_INFO, summary= "Added new device %s" % deviceName )) self.dmd.ZenEventManager.sendEvent(evt) def getDevice(self, device_specs): """ Find or create the specified device @parameter device_specs: device creation dictionary @type device_specs: dictionary @return: device or None @rtype: DMD device object """ if 'deviceName' not in device_specs: return None name = device_specs['deviceName'] devobj = self.dmd.Devices.findDevice(name) if devobj is not None: self.log.info("Found existing device %s" % name) return devobj specs = {} for key in self.loader_args: if key in device_specs: specs[key] = device_specs[key] try: self.log.info("Creating device %s" % name) # Do NOT model at this time specs['discoverProto'] = 'none' self.loader(**specs) devobj = self.dmd.Devices.findDevice(name) if devobj is None: self.log.error("Unable to find newly created device %s -- skipping" \ % name) else: self.notifyNewDeviceCreated(name) except Exception: msg = "Unable to load %s -- skipping" % name self.reportException(msg, name) return devobj def buildOptions(self): """ Add our command-line options to the basics """ ZCmdBase.buildOptions(self) self.parser.add_option('--show_options', dest="show_options", default=False, action="store_true", help="Show the various options understood by the loader") self.parser.add_option('--sample_configs', dest="sample_configs", default=False, action="store_true", help="Show an example configuration file.") self.parser.add_option('--showModelOutput', dest="showModelOutput", default=True, action="store_false", help="Show modelling activity") self.parser.add_option('--nocommit', dest="nocommit", default=False, action="store_true", help="Don't commit changes to the ZODB. Use for verifying config file.") self.parser.add_option('--nomodel', dest="nomodel", default=False, action="store_true", help="Don't model the remote devices. Must be able to commit changes.") def parseDevices(self, data): """ From the list of strings in rawDevices, construct a list of device dictionaries, ready to load into Zenoss. @parameter data: list of strings representing device entries @type data: list of strings @return: list of parsed device entries @rtype: list of dictionaries """ if not data: return [] comment = re.compile(r'^\s*#.*') defaults = {'devicePath':"/Discovered" } finalList = [] i = 0 while i < len(data): line = data[i] line = re.sub(comment, '', line).strip() if line == '': i += 1 continue # Check for line continuation character '\' while line[-1] == '\\' and i < len(data): i += 1 line = line[:-1] + data[i] line = re.sub(comment, '', line).strip() if line[0] == '/' or line[1] == '/': # Found an organizer defaults = self.parseDeviceEntry(line, {}) if defaults is None: defaults = {'devicePath':"/Discovered" } else: defaults['devicePath'] = defaults['deviceName'] del defaults['deviceName'] self.addOrganizer(defaults) else: configs = self.parseDeviceEntry(line, defaults) if configs: finalList.append(configs) i += 1 return finalList def parseDeviceEntry(self, line, defaults): """ Build a dictionary of properties from one line's input @parameter line: string containing one device's info @type line: string @parameter defaults: dictionary of default settings @type defaults: dictionary @return: parsed device entry @rtype: dictionary """ options = [] # Note: organizers and device names can have spaces in them if line[0] in ["'", '"']: delim = line[0] eom = line.find(delim, 1) if eom == -1: self.log.error("While reading name, unable to parse" \ " the entry for %s -- skipping", line ) return None name = line[1:eom] options = line[eom+1:] else: options = line.split(None, 1) name = options.pop(0) if options: options = options.pop(0) configs = defaults.copy() configs['deviceName'] = name if options: try: # Add a newline to allow for trailing comments evalString = 'dict(' + options + '\n)' optionsDict = eval(evalString) # ZEN-202: Set values directly rather than calling methods afterwards. for method,setting in METHODS_TO_SETTINGS.iteritems(): if method in optionsDict: optionsDict[setting] = optionsDict.pop(method) configs.update(optionsDict) except Exception: self.log.error( "Unable to parse the entry for %s -- skipping" % name ) self.log.error( "Raw string: %s" % options ) return None return configs if __name__=='__main__': batchLoader = BatchDeviceLoader() if batchLoader.options.show_options: print "Options = %s" % sorted( batchLoader.loader_args.keys() ) help(batchLoader.loader) sys.exit(0) if batchLoader.options.sample_configs: print batchLoader.sample_configs sys.exit(0) device_list = batchLoader.loadDeviceList() if not device_list: batchLoader.log.warn("No device entries found to load.") sys.exit(1) batchLoader.processDevices(device_list) sys.exit(0)
zenoss/ZenPacks.zenoss.Puppet
ZenPacks/zenoss/Puppet/BatchDeviceLoader.py
Python
gpl-2.0
26,792
0.003844
#coding:utf-8 from django.shortcuts import render # Create your views here. from django.http import HttpResponse # 引入我们创建的表单类 from models import SearchForm,SearchRepoForm,ConnectForm import requests import json from chgithub import GetSearchInfo,SearchRepo,SocialConnect,SearchConnect,nonSocialConnect def index(request): return render(request, 'index.html') def add(request, a, b): c = int(a) + int(b) return HttpResponse(str(c)) def home(request): return render(request, 'index.html') def form(request): if request.method == 'POST': # 当提交表单时 form = SearchForm(request.POST) # form 包含提交的数据 if form.is_valid(): # 如果提交的数据合法 location = form.cleaned_data['location'] language = form.cleaned_data['language'] Dict = {'filename':location+language} if GetSearchInfo(location,language): return render(request,'search_result.html',{'Dict':json.dumps(Dict)}) else: return HttpResponse(str("查找结果不存在,请重新输入!")) else: # 当正常访问时 form = SearchForm() return render(request, 'search.html', {'form': form}) def repo(request): if request.method == 'POST': # 当提交表单时 form = SearchRepoForm(request.POST) # form 包含提交的数据 if form.is_valid(): # 如果提交的数据合法 stars = form.cleaned_data['stars'] language = form.cleaned_data['language'] Dict = {'filename':language+stars} if SearchRepo(stars,language): return render(request,'repo_result.html',{'Dict':json.dumps(Dict)}) else: return HttpResponse(str("查找结果不存在,请重新输入!")) else: # 当正常访问时 form = SearchRepoForm() return render(request, 'repo.html', {'form': form}) def connect(request): if request.method == 'POST': # 当提交表单时 form = ConnectForm(request.POST) # form 包含提交的数据 if form.is_valid(): # 如果提交的数据合法 user = form.cleaned_data['user'] repo = form.cleaned_data['repo'] Dict = {'filename':user+repo} if SocialConnect(user,repo): return render(request,'connect_result.html',{'Dict':json.dumps(Dict)}) else: return HttpResponse(str("查找结果不存在,请重新输入!")) else: # 当正常访问时 form = ConnectForm() return render(request, 'connect.html', {'form': form}) def search(request): searchKey = request.GET['searchKey'] Dict = {'filename': searchKey.strip()} if searchKey.strip()=='': return HttpResponse(str("请输入查找关键字!")) else: if(SearchConnect(searchKey.strip())): return render(request, 'search_key_result.html',{'Dict':json.dumps(Dict)}) else: return HttpResponse(str('请重新查找!')) def nonconnect(request): if request.method == 'POST': # 当提交表单时 form = ConnectForm(request.POST) # form 包含提交的数据 if form.is_valid(): # 如果提交的数据合法 user = form.cleaned_data['user'] repo = form.cleaned_data['repo'] Dict = {'filename':user+repo} if nonSocialConnect(user,repo): return render(request,'connect_result.html',{'Dict':json.dumps(Dict)}) else: return HttpResponse(str("查找结果不存在,请重新输入!")) else: # 当正常访问时 form = ConnectForm() return render(request, 'connect.html', {'form': form})
ch710798472/GithubRecommended
RecGithub/views.py
Python
mit
3,774
0.014168
# Copyright Iris contributors # # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """Unit tests for the `iris.fileformats.abf.ABFField` class.""" # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests # isort:skip from unittest import mock from iris.fileformats.abf import ABFField class MethodCounter: def __init__(self, method_name): self.method_name = method_name self.count = 0 def __enter__(self): self.orig_method = getattr(ABFField, self.method_name) def new_method(*args, **kwargs): self.count += 1 self.orig_method(*args, **kwargs) setattr(ABFField, self.method_name, new_method) return self def __exit__(self, exc_type, exc_value, traceback): setattr(ABFField, self.method_name, self.orig_method) return False class Test_data(tests.IrisTest): def test_single_read(self): path = "0000000000000000jan00000" field = ABFField(path) with mock.patch("iris.fileformats.abf.np.fromfile") as fromfile: with MethodCounter("__getattr__") as getattr: with MethodCounter("_read") as read: field.data fromfile.assert_called_once_with(path, dtype=">u1") self.assertEqual(getattr.count, 1) self.assertEqual(read.count, 1) if __name__ == "__main__": tests.main()
SciTools/iris
lib/iris/tests/unit/fileformats/abf/test_ABFField.py
Python
lgpl-3.0
1,558
0
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/client/mailer.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # disable this warning for the email.mime.* modules that have to be imported # pylint: disable=unused-import import codecs import collections import csv import datetime import email.encoders as encoders import email.mime as mime import email.mime.base import email.mime.image import email.mime.multipart import email.mime.text import email.utils import logging import mimetypes import os import smtplib import socket import threading import time import urllib.parse from king_phisher import errors from king_phisher import ics from king_phisher import ipaddress from king_phisher import its from king_phisher import templates from king_phisher import utilities from king_phisher.client import gui_utilities from king_phisher.client.dialogs import ssh_host_key from king_phisher.constants import ConnectionErrorReason from king_phisher.ssh_forward import SSHTCPForwarder from gi.repository import GLib import paramiko import smoke_zephyr.utilities __all__ = ( 'guess_smtp_server_address', 'MailSenderThread', 'render_message_template' ) template_environment = templates.MessageTemplateEnvironment() MessageAttachments = collections.namedtuple('MessageAttachments', ('files', 'images')) """ A named tuple for holding both image and file attachments for a message. .. py:attribute:: files A tuple of :py:class:`~.mime.MIMEBase` instances representing the messsages attachments. .. py:attribute:: images A tuple of :py:class:`~.mime.MIMEImage` instances representing the images in the message. """ MIME_TEXT_PLAIN = 'This message requires an HTML aware email agent to be properly viewed.\r\n\r\n' """The static string to place in MIME message as a text/plain part. This is shown by email clients that do not support HTML.""" def _iterate_targets_file(target_file, config=None): target_file_h = open(target_file, 'rU') csv_reader = csv.DictReader(target_file_h, ('first_name', 'last_name', 'email_address', 'department')) uid_charset = None if config is None else config['mailer.message_uid.charset'] for line_no, raw_target in enumerate(csv_reader, 1): if None in raw_target: # remove the additional fields del raw_target[None] if its.py_v2: # this will intentionally cause a UnicodeDecodeError to be raised as is the behaviour in python 3.x # when csv.DictReader is initialized raw_target = dict((k, (v if v is None else v.decode('utf-8'))) for k, v in raw_target.items()) if uid_charset is not None: raw_target['uid'] = utilities.make_message_uid( upper=uid_charset['upper'], lower=uid_charset['lower'], digits=uid_charset['digits'] ) target = MessageTarget(line=line_no, **raw_target) # the caller needs to catch and process the missing fields appropriately yield target target_file_h.close() def count_targets_file(target_file): """ Count the number of valid targets that the specified file contains. This skips lines which are missing fields or where the email address is invalid. :param str target_file: The path the the target CSV file on disk. :return: The number of valid targets. :rtype: int """ count = 0 for target in _iterate_targets_file(target_file): if target.missing_fields: continue if not utilities.is_valid_email_address(target.email_address): continue count += 1 return count def get_invite_start_from_config(config): """ Get the start time for an invite from the configuration. This takes into account whether the invite is for all day or starts at a specific time. :param dict config: The King Phisher client configuration. :return: The timestamp of when the invite is to start. :rtype: :py:class:`datetime.datetime` """ if config['mailer.calendar_invite_all_day']: start_time = datetime.datetime.combine( config['mailer.calendar_invite_date'], datetime.time(0, 0) ) else: start_time = datetime.datetime.combine( config['mailer.calendar_invite_date'], datetime.time( int(config['mailer.calendar_invite_start_hour']), int(config['mailer.calendar_invite_start_minute']) ) ) return start_time @smoke_zephyr.utilities.Cache('3m') def guess_smtp_server_address(host, forward_host=None): """ Guess the IP address of the SMTP server that will be connected to given the SMTP host information and an optional SSH forwarding host. If a hostname is in use it will be resolved to an IP address, either IPv4 or IPv6 and in that order. If a hostname resolves to multiple IP addresses, None will be returned. This function is intended to guess the SMTP servers IP address given the client configuration so it can be used for SPF record checks. :param str host: The SMTP server that is being connected to. :param str forward_host: An optional host that is being used to tunnel the connection. :return: The IP address of the SMTP server. :rtype: None, :py:class:`ipaddress.IPv4Address`, :py:class:`ipaddress.IPv6Address` """ host = host.rsplit(':', 1)[0] if ipaddress.is_valid(host): ip = ipaddress.ip_address(host) if not ip.is_loopback: return ip else: info = None for family in (socket.AF_INET, socket.AF_INET6): try: info = socket.getaddrinfo(host, 1, family) except socket.gaierror: continue info = set(list([r[4][0] for r in info])) if len(info) != 1: return break if info: ip = ipaddress.ip_address(info.pop()) if not ip.is_loopback: return ip if forward_host: return guess_smtp_server_address(forward_host) return def render_message_template(template, config, target=None, analyze=False): """ Take a message from a template and format it to be sent by replacing variables and processing other template directives. If the *target* parameter is not set, a placeholder will be created and the message will be formatted to be previewed. :param str template: The message template. :param dict config: The King Phisher client configuration. :param target: The messages intended target information. :type target: :py:class:`.MessageTarget` :param bool analyze: Set the template environment to analyze mode. :return: The formatted message. :rtype: str """ if target is None: target = MessageTargetPlaceholder(uid=config['server_config'].get('server.secret_id')) template_environment.set_mode(template_environment.MODE_PREVIEW) if analyze: template_environment.set_mode(template_environment.MODE_ANALYZE) template = template_environment.from_string(template) template_vars = {} template_vars['campaign'] = dict( id=str(config['campaign_id']), name=config['campaign_name'] ) template_vars['client'] = dict( first_name=target.first_name, last_name=target.last_name, email_address=target.email_address, department=target.department, company_name=config.get('mailer.company_name'), message_id=target.uid ) template_vars['sender'] = dict( email=config.get('mailer.source_email'), friendly_alias=config.get('mailer.source_email_alias'), reply_to=config.get('mailer.reply_to_email') ) template_vars['uid'] = target.uid message_type = config.get('mailer.message_type', 'email') template_vars['message_type'] = message_type if message_type == 'calendar_invite': template_vars['calendar_invite'] = dict( all_day=config.get('mailer.calendar_invite_all_day'), location=config.get('mailer.calendar_invite_location'), start=get_invite_start_from_config(config), summary=config.get('mailer.calendar_invite_summary') ) template_vars['message'] = dict( attachment=config.get('mailer.attachment_file'), importance=config.get('mailer.importance'), recipient=dict( field=config.get('mailer.target_field', 'to'), to=(target.email_address if config.get('mailer.target_field') == 'to' else config.get('mailer.recipient_email_to', '')), cc=(target.email_address if config.get('mailer.target_field') == 'cc' else config.get('mailer.recipient_email_cc', '')), bcc=(target.email_address if config.get('mailer.target_field') == 'bcc' else '') ), sensitivity=config.get('mailer.sensitivity'), subject=config.get('mailer.subject'), template=config.get('mailer.html_file'), type=message_type ) webserver_url = config.get('mailer.webserver_url', '') webserver_url = urllib.parse.urlparse(webserver_url) tracking_image = config['server_config']['server.tracking_image'] template_vars['webserver'] = webserver_url.netloc tracking_url = urllib.parse.urlunparse((webserver_url.scheme, webserver_url.netloc, tracking_image, '', 'id=' + target.uid, '')) webserver_url = urllib.parse.urlunparse((webserver_url.scheme, webserver_url.netloc, webserver_url.path, '', '', '')) template_vars['tracking_dot_image_tag'] = "<img src=\"{0}\" style=\"display:none\" />".format(tracking_url) template_vars_url = {} template_vars_url['rickroll'] = 'http://www.youtube.com/watch?v=oHg5SJYRHA0' template_vars_url['webserver'] = webserver_url + '?id=' + target.uid template_vars_url['webserver_raw'] = webserver_url template_vars_url['tracking_dot'] = tracking_url template_vars['url'] = template_vars_url template_vars.update(template_environment.standard_variables) return template.render(template_vars) def rfc2282_timestamp(dt=None, utc=False): """ Convert a :py:class:`datetime.datetime` instance into an :rfc:`2282` compliant timestamp suitable for use in MIME-encoded messages. :param dt: A time to use for the timestamp otherwise the current time is used. :type dt: :py:class:`datetime.datetime` :param utc: Whether to return the timestamp as a UTC offset or from the local timezone. :return: The timestamp. :rtype: str """ dt = dt or datetime.datetime.utcnow() # email.utils.formatdate wants the time to be in the local timezone dt = utilities.datetime_utc_to_local(dt) return email.utils.formatdate(time.mktime(dt.timetuple()), not utc) class MessageTarget(object): """ A simple class for holding information regarding a messages intended recipient. """ required_fields = ('first_name', 'last_name', 'email_address') __slots__ = 'department', 'email_address', 'first_name', 'last_name', 'line', 'uid' def __init__(self, first_name, last_name, email_address, uid=None, department=None, line=None): self.first_name = first_name """The target recipient's first name.""" self.last_name = last_name """The target recipient's last name.""" self.email_address = utilities.nonempty_string(email_address) """The target recipient's email address.""" self.uid = uid """The unique identifier that is going to be used for this target.""" if self.uid is None: self.uid = utilities.make_message_uid() self.department = utilities.nonempty_string(department) """The target recipient's department name.""" self.line = line """The line number in the file from which this target was loaded.""" def __repr__(self): return "<{0} first_name={1!r} last_name={2!r} email_address={3!r} >".format(self.__class__.__name__, self.first_name, self.last_name, self.email_address) @property def missing_fields(self): return tuple(field for field in self.required_fields if getattr(self, field) is None) class MessageTargetPlaceholder(MessageTarget): """ A default :py:class:`~.MessageTarget` for use as a placeholder value while rendering, performing tests, etc. """ def __init__(self, uid=None): super(MessageTargetPlaceholder, self).__init__('Alice', 'Liddle', 'aliddle@wonderland.com', uid=uid, department='Visitors') class TopMIMEMultipart(mime.multipart.MIMEMultipart): """ A :py:class:`.mime.multipart.MIMEMultipart` subclass for representing the top / outer most part of a MIME multipart message. This adds additional default headers to the message. """ def __init__(self, mime_type, config, target): """ :param str mime_type: The type of this part such as related or alternative. :param dict config: The King Phisher client configuration. :param target: The target information for the messages intended recipient. :type target: :py:class:`.MessageTarget` """ mime.multipart.MIMEMultipart.__init__(self, mime_type, charset='utf-8') self['Subject'] = render_message_template(config['mailer.subject'], config, target) if config.get('mailer.reply_to_email'): self.add_header('reply-to', config['mailer.reply_to_email']) if config.get('mailer.source_email_alias'): self['From'] = "\"{0}\" <{1}>".format(config['mailer.source_email_alias'], config['mailer.source_email']) else: self['From'] = config['mailer.source_email'] self['Date'] = rfc2282_timestamp() self.preamble = 'This is a multi-part message in MIME format.' class MIMEText(mime.text.MIMEText): def __init__(self, text, subtype, charset='utf-8'): super(MIMEText, self).__init__(text, subtype, charset) @property def payload_string(self): return self.get_payload_string() @payload_string.setter def payload_string(self, text): self.set_payload_string(text) def get_payload_string(self): payload = self.get_payload(decode=True) if payload: charset = self.get_charset() payload = payload.decode(charset.input_charset) return payload def set_payload_string(self, payload, charset=None): if 'Content-Transfer-Encoding' in self: del self['Content-Transfer-Encoding'] return self.set_payload(payload, charset=charset or self.get_charset()) class MailSenderThread(threading.Thread): """ The King Phisher threaded email message sender. This object manages the sending of emails for campaigns and supports pausing the sending of messages which can later be resumed by unpausing. This object reports its information to the GUI through an optional :py:class:`.MailSenderSendTab` instance, these two objects are very interdependent. """ def __init__(self, application, target_file, rpc, tab=None): """ :param application: The GTK application that the thread is associated with. :type application: :py:class:`.KingPhisherClientApplication` :param str target_file: The CSV formatted file to read message targets from. :param tab: The GUI tab to report information to. :type tab: :py:class:`.MailSenderSendTab` :param rpc: The client's connected RPC instance. :type rpc: :py:class:`.KingPhisherRPCClient` """ super(MailSenderThread, self).__init__() self.daemon = True self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__) self.application = application self.config = self.application.config self.target_file = target_file """The name of the target file in CSV format.""" self.tab = tab """The optional :py:class:`.MailSenderSendTab` instance for reporting status messages to the GUI.""" self.rpc = rpc self._ssh_forwarder = None self.smtp_connection = None """The :py:class:`smtplib.SMTP` connection instance.""" self.smtp_server = smoke_zephyr.utilities.parse_server(self.config['smtp_server'], 25) self.running = threading.Event() """A :py:class:`threading.Event` object indicating if emails are being sent.""" self.paused = threading.Event() """A :py:class:`threading.Event` object indicating if the email sending operation is or should be paused.""" self.should_stop = threading.Event() self.max_messages_per_minute = float(self.config.get('smtp_max_send_rate', 0.0)) self.mail_options = [] def tab_notify_sent(self, emails_done, emails_total): """ Notify the tab that messages have been sent. :param int emails_done: The number of emails that have been sent. :param int emails_total: The total number of emails that are going to be sent. """ if isinstance(self.tab, gui_utilities.GladeGObject): GLib.idle_add(lambda x: self.tab.notify_sent(*x), (emails_done, emails_total)) def tab_notify_status(self, message): """ Handle a status message regarding the message sending operation. :param str message: The notification message. """ self.logger.info(message.lower()) if isinstance(self.tab, gui_utilities.GladeGObject): GLib.idle_add(self.tab.notify_status, message + '\n') def tab_notify_stopped(self): """ Notify the tab that the message sending operation has stopped. """ if isinstance(self.tab, gui_utilities.GladeGObject): GLib.idle_add(self.tab.notify_stopped) def server_ssh_connect(self): """ Connect to the remote SMTP server over SSH and configure port forwarding with :py:class:`.SSHTCPForwarder` for tunneling SMTP traffic. :return: The connection status as one of the :py:class:`.ConnectionErrorReason` constants. """ server = smoke_zephyr.utilities.parse_server(self.config['ssh_server'], 22) username = self.config['ssh_username'] password = self.config['ssh_password'] remote_server = smoke_zephyr.utilities.parse_server(self.config['smtp_server'], 25) try: self._ssh_forwarder = SSHTCPForwarder( server, username, password, remote_server, private_key=self.config.get('ssh_preferred_key'), missing_host_key_policy=ssh_host_key.MissingHostKeyPolicy(self.application) ) self._ssh_forwarder.start() except errors.KingPhisherAbortError as error: self.logger.info("ssh connection aborted ({0})".format(error.message)) except paramiko.AuthenticationException: self.logger.warning('failed to authenticate to the remote ssh server') return ConnectionErrorReason.ERROR_AUTHENTICATION_FAILED except paramiko.SSHException as error: self.logger.warning("failed with: {0!r}".format(error)) except socket.timeout: self.logger.warning('the connection to the ssh server timed out') except Exception: self.logger.warning('failed to connect to the remote ssh server', exc_info=True) else: self.smtp_server = self._ssh_forwarder.local_server return ConnectionErrorReason.SUCCESS return ConnectionErrorReason.ERROR_UNKNOWN def server_smtp_connect(self): """ Connect and optionally authenticate to the configured SMTP server. :return: The connection status as one of the :py:class:`.ConnectionErrorReason` constants. """ if self.config.get('smtp_ssl_enable', False): SmtpClass = smtplib.SMTP_SSL else: SmtpClass = smtplib.SMTP self.logger.debug('opening a new connection to the SMTP server') try: self.smtp_connection = SmtpClass(*self.smtp_server, timeout=15) self.smtp_connection.ehlo() except smtplib.SMTPException: self.logger.warning('received an SMTPException while connecting to the SMTP server', exc_info=True) return ConnectionErrorReason.ERROR_UNKNOWN except socket.error: self.logger.warning('received a socket.error while connecting to the SMTP server') return ConnectionErrorReason.ERROR_CONNECTION if not self.config.get('smtp_ssl_enable', False) and 'starttls' in self.smtp_connection.esmtp_features: self.logger.debug('target SMTP server supports the STARTTLS extension') try: self.smtp_connection.starttls() self.smtp_connection.ehlo() except smtplib.SMTPException: self.logger.warning('received an SMTPException while negotiating STARTTLS with the SMTP server', exc_info=True) return ConnectionErrorReason.ERROR_UNKNOWN except socket.error: self.logger.warning('received a socket.error while negotiating STARTTLS with the SMTP server') return ConnectionErrorReason.ERROR_CONNECTION username = self.config.get('smtp_username', '') if username: password = self.config.get('smtp_password', '') try: self.smtp_connection.login(username, password) except smtplib.SMTPException as error: self.logger.warning('received an {0} while authenticating to the SMTP server'.format(error.__class__.__name__)) self.smtp_connection.quit() return ConnectionErrorReason.ERROR_AUTHENTICATION_FAILED if self.smtp_connection.has_extn('SMTPUTF8'): self.logger.debug('target SMTP server supports the SMTPUTF8 extension') self.mail_options.append('SMTPUTF8') return ConnectionErrorReason.SUCCESS def server_smtp_disconnect(self): """Clean up and close the connection to the remote SMTP server.""" if self.smtp_connection: self.logger.debug('closing the connection to the SMTP server') try: self.smtp_connection.quit() except smtplib.SMTPServerDisconnected: pass self.smtp_connection = None self.tab_notify_status('Disconnected from the SMTP server') def server_smtp_reconnect(self): """ Disconnect from the remote SMTP server and then attempt to open a new connection to it. :return: The reconnection status. :rtype: bool """ if self.smtp_connection: try: self.smtp_connection.quit() except smtplib.SMTPServerDisconnected: pass self.smtp_connection = None while self.server_smtp_connect() != ConnectionErrorReason.SUCCESS: self.tab_notify_status('Failed to reconnect to the SMTP server') if not self.process_pause(True): return False return True def count_targets(self): """ Count the number of targets that will be sent messages. :return: The number of targets that will be sent messages. :rtype: int """ return sum(1 for _ in self.iterate_targets(counting=True)) def iterate_targets(self, counting=False): """ Iterate over each of the targets as defined within the configuration. If *counting* is ``False``, messages will not be displayed to the end user through the notification tab. :param bool counting: Whether or not to iterate strictly for counting purposes. :return: Each message target. :rtype: :py:class:`~.MessageTarget` """ mailer_tab = self.application.main_tabs['mailer'] target_type = self.config['mailer.target_type'] if target_type == 'single': target_name = self.config['mailer.target_name'].split(' ') while len(target_name) < 2: target_name.append('') uid_charset = self.config['mailer.message_uid.charset'] target = MessageTarget( first_name=target_name[0].strip(), last_name=target_name[1].strip(), email_address=self.config['mailer.target_email_address'].strip(), uid=utilities.make_message_uid( upper=uid_charset['upper'], lower=uid_charset['lower'], digits=uid_charset['digits'] ) ) if not counting: mailer_tab.emit('target-create', target) yield target elif target_type == 'file': for target in _iterate_targets_file(self.target_file, config=self.config): missing_fields = target.missing_fields if missing_fields: if counting: msg = "Target CSV line {0} skipped due to missing field{1}".format(target.line, ('' if len(missing_fields) == 1 else 's')) msg += ':' + ', '.join(field.replace('_', ' ') for field in missing_fields) self.tab_notify_status(msg) continue if not utilities.is_valid_email_address(target.email_address): self.logger.warning("skipping line {0} in target csv file due to invalid email address: {1}".format(target.line, target.email_address)) continue if not counting: mailer_tab.emit('target-create', target) yield target else: self.logger.error("the configured target type '{0}' is unsupported".format(target_type)) def run(self): """The entry point of the thread.""" self.logger.debug("mailer routine running in tid: 0x{0:x}".format(threading.current_thread().ident)) self.running.set() self.should_stop.clear() self.paused.clear() try: self._prepare_env() emails_done = self._send_messages() except UnicodeDecodeError as error: self.logger.error("a unicode error occurred, {0} at position: {1}-{2}".format(error.reason, error.start, error.end)) self.tab_notify_status("A unicode error occurred, {0} at position: {1}-{2}".format(error.reason, error.start, error.end)) except Exception: self.logger.error('an error occurred while sending messages', exc_info=True) self.tab_notify_status('An error occurred while sending messages.') else: self.tab_notify_status("Finished sending, successfully sent {0:,} messages".format(emails_done)) self.server_smtp_disconnect() if self._ssh_forwarder: self._ssh_forwarder.stop() self._ssh_forwarder = None self.tab_notify_status('Disconnected from the SSH server') self.tab_notify_stopped() return def process_pause(self, set_pause=False): """ Pause sending emails if a pause request has been set. :param bool set_pause: Whether to request a pause before processing it. :return: Whether or not the sending operation was cancelled during the pause. :rtype: bool """ if set_pause: if isinstance(self.tab, gui_utilities.GladeGObject): gui_utilities.glib_idle_add_wait(lambda: self.tab.pause_button.set_property('active', True)) else: self.pause() if self.paused.is_set(): self.tab_notify_status('Paused sending emails, waiting to resume') self.running.wait() self.paused.clear() if self.should_stop.is_set(): self.tab_notify_status('Sending emails cancelled') return False self.tab_notify_status('Resuming sending emails') self.max_messages_per_minute = float(self.config.get('smtp_max_send_rate', 0.0)) return True def create_message(self, target=None): if target is None: target = MessageTargetPlaceholder(uid=self.config['server_config'].get('server.secret_id')) attachments = self.get_mime_attachments() message = getattr(self, 'create_message_' + self.config['mailer.message_type'])(target, attachments) # set the Message-ID header, per RFC-2822 using the target UID and the sender domain mime_msg_id = '<' + target.uid if '@' in self.config['mailer.source_email']: mime_msg_id += '@' + self.config['mailer.source_email'].split('@', 1)[1] mime_msg_id += '>' message['Message-ID'] = mime_msg_id mailer_tab = self.application.main_tabs['mailer'] mailer_tab.emit('message-create', target, message) return message def create_message_calendar_invite(self, target, attachments): """ Create a MIME calendar invite to be sent from a set of parameters. :param target: The information for the messages intended recipient. :type target: :py:class:`.MessageTarget` :param str uid: The message's unique identifier. :param attachments: The attachments to add to the created message. :type attachments: :py:class:`Attachments` :return: The new MIME message. :rtype: :py:class:`email.mime.multipart.MIMEMultipart` """ top_msg = TopMIMEMultipart('mixed', self.config, target) top_msg['To'] = target.email_address related_msg = mime.multipart.MIMEMultipart('related') top_msg.attach(related_msg) alt_msg = mime.multipart.MIMEMultipart('alternative') related_msg.attach(alt_msg) part = mime.base.MIMEBase('text', 'plain', charset='utf-8') part.set_payload(MIME_TEXT_PLAIN) encoders.encode_base64(part) alt_msg.attach(part) with codecs.open(self.config['mailer.html_file'], 'r', encoding='utf-8') as file_h: msg_template = file_h.read() formatted_msg = render_message_template(msg_template, self.config, target=target) part = MIMEText(formatted_msg, 'html') alt_msg.attach(part) start_time = get_invite_start_from_config(self.config) if self.config['mailer.calendar_invite_all_day']: duration = ics.DurationAllDay() else: duration = int(self.config['mailer.calendar_invite_duration']) * 60 ical = ics.Calendar( self.config['mailer.source_email'], start_time, self.config.get('mailer.calendar_invite_summary'), duration=duration, location=self.config.get('mailer.calendar_invite_location') ) ical.add_attendee(target.email_address, rsvp=self.config.get('mailer.calendar_request_rsvp', False)) part = mime.base.MIMEBase('text', 'calendar', charset='utf-8', method='REQUEST') part.set_payload(ical.to_ical(encoding='utf-8')) encoders.encode_base64(part) alt_msg.attach(part) for attach in attachments.images: related_msg.attach(attach) for attach in attachments.files: top_msg.attach(attach) return top_msg def create_message_email(self, target, attachments): """ Create a MIME email to be sent from a set of parameters. :param target: The information for the messages intended recipient. :type target: :py:class:`.MessageTarget` :param str uid: The message's unique identifier. :param attachments: The attachments to add to the created message. :type attachments: :py:class:`MessageAttachments` :return: The new MIME message. :rtype: :py:class:`email.mime.multipart.MIMEMultipart` """ msg = TopMIMEMultipart('related', self.config, target) target_field = self.config.get('mailer.target_field', 'to').lower() for header in ('To', 'CC', 'BCC'): if header.lower() == target_field: msg[header] = '<' + target.email_address + '>' continue value = self.config.get('mailer.recipient_email_' + header.lower()) if value: msg[header] = '<' + value + '>' importance = self.config.get('mailer.importance', 'Normal') if importance != 'Normal': msg['Importance'] = importance sensitivity = self.config.get('mailer.sensitivity', 'Normal') if sensitivity != 'Normal': msg['Sensitivity'] = sensitivity msg_alt = mime.multipart.MIMEMultipart('alternative') msg.attach(msg_alt) with codecs.open(self.config['mailer.html_file'], 'r', encoding='utf-8') as file_h: msg_template = file_h.read() formatted_msg = render_message_template(msg_template, self.config, target=target) # RFC-1341 page 35 states friendliest part must be attached first msg_body = MIMEText(MIME_TEXT_PLAIN, 'plain') msg_alt.attach(msg_body) msg_body = MIMEText(formatted_msg, 'html') msg_alt.attach(msg_body) msg_alt.set_default_type('html') # process attachments for attach in attachments.files: msg.attach(attach) for attach in attachments.images: msg.attach(attach) return msg def get_mime_attachments(self): """ Return a :py:class:`.MessageAttachments` object containing both the images and raw files to be included in sent messages. :return: A namedtuple of both files and images in their MIME containers. :rtype: :py:class:`.MessageAttachments` """ files = [] # allow the attachment_file.post_processing to be attached instead of # attachment_file so attachment_file can be used as an input for # arbitrary operations to modify without over writing the original attachment_file = self.config.get('mailer.attachment_file.post_processing') delete_attachment_file = False if attachment_file is not None: if not isinstance(attachment_file, str): raise TypeError('config option mailer.attachment_file.post_processing is not a readable file') if not os.path.isfile(attachment_file) and os.access(attachment_file, os.R_OK): raise ValueError('config option mailer.attachment_file.post_processing is not a readable file') self.config['mailer.attachment_file.post_processing'] = None delete_attachment_file = True else: attachment_file = self.config.get('mailer.attachment_file') if attachment_file: attachfile = mime.base.MIMEBase(*mimetypes.guess_type(attachment_file)) attachfile.set_payload(open(attachment_file, 'rb').read()) encoders.encode_base64(attachfile) attachfile.add_header('Content-Disposition', "attachment; filename=\"{0}\"".format(os.path.basename(attachment_file))) files.append(attachfile) if delete_attachment_file and os.access(attachment_file, os.W_OK): os.remove(attachment_file) images = [] for attachment_file, attachment_name in template_environment.attachment_images.items(): attachfile = mime.image.MIMEImage(open(attachment_file, 'rb').read()) attachfile.add_header('Content-ID', "<{0}>".format(attachment_name)) attachfile.add_header('Content-Disposition', "inline; filename=\"{0}\"".format(attachment_name)) images.append(attachfile) return MessageAttachments(tuple(files), tuple(images)) def _prepare_env(self): with codecs.open(self.config['mailer.html_file'], 'r', encoding='utf-8') as file_h: msg_template = file_h.read() render_message_template(msg_template, self.config, analyze=True) template_environment.set_mode(template_environment.MODE_SEND) def _send_messages(self): emails_done = 0 mailer_tab = self.application.main_tabs['mailer'] max_messages_per_connection = self.config.get('mailer.max_messages_per_connection', 5) emails_total = "{0:,}".format(self.count_targets()) sending_line = "Sending email {{0: >{0},}} of {1} with UID: {{1}} to {{2}}".format(len(emails_total), emails_total) emails_total = int(emails_total.replace(',', '')) for target in self.iterate_targets(): iteration_time = time.time() if self.should_stop.is_set(): self.tab_notify_status('Sending emails cancelled') break if not self.process_pause(): break if emails_done > 0 and max_messages_per_connection > 0 and (emails_done % max_messages_per_connection == 0): self.server_smtp_reconnect() emails_done += 1 if not all(mailer_tab.emit('target-send', target)): self.logger.info("target-send signal subscriber vetoed target: {0!r}".format(target)) continue self.tab_notify_status(sending_line.format(emails_done, target.uid, target.email_address)) message = self.create_message(target=target) if not all(mailer_tab.emit('message-send', target, message)): self.logger.info("message-send signal subscriber vetoed message to target: {0!r}".format(target)) continue self.rpc( 'campaign/message/new/deferred', self.config['campaign_id'], target.uid, target.email_address, target.first_name, target.last_name, target.department ) if not self._try_send_message(target.email_address, message): self.rpc('db/table/delete', 'messages', target.uid) break self.rpc('db/table/set', 'messages', target.uid, ('sent',), (datetime.datetime.utcnow(),)) self.tab_notify_sent(emails_done, emails_total) self.application.emit('message-sent', target.uid, target.email_address) if self.max_messages_per_minute: iteration_time = (time.time() - iteration_time) self._sleep((60.0 / float(self.max_messages_per_minute)) - iteration_time) return emails_done def _sleep(self, duration): while duration > 0: sleep_chunk = min(duration, 0.5) time.sleep(sleep_chunk) if self.should_stop.is_set(): break duration -= sleep_chunk return self.should_stop.is_set() def _try_send_message(self, *args, **kwargs): message_sent = False while not message_sent and not self.should_stop.is_set(): for i in range(0, 3): try: self.send_message(*args, **kwargs) message_sent = True break except smtplib.SMTPServerDisconnected: self.logger.warning('failed to send message, the server has been disconnected') self.tab_notify_status('Failed to send message, the server has been disconnected') self.tab_notify_status('Sleeping for 5 seconds before attempting to reconnect') if self._sleep(5): break self.smtp_connection = None self.server_smtp_reconnect() except smtplib.SMTPException as error: self.tab_notify_status("Failed to send message (exception: {0})".format(error.__class__.__name__)) self.logger.warning("failed to send message (exception: smtplib.{0})".format(error.__class__.__name__)) self._sleep((i + 1) ** 2) if not message_sent: self.server_smtp_disconnect() if not self.process_pause(True): return False self.server_smtp_reconnect() return True def send_message(self, target_email, msg): """ Send an email using the connected SMTP server. :param str target_email: The email address to send the message to. :param msg: The formatted message to be sent. :type msg: :py:class:`.mime.multipart.MIMEMultipart` """ source_email = self.config['mailer.source_email_smtp'] self.smtp_connection.sendmail(source_email, target_email, msg.as_string(), self.mail_options) def pause(self): """ Sets the :py:attr:`~.MailSenderThread.running` and :py:attr:`~.MailSenderThread.paused` flags correctly to indicate that the object is paused. """ self.running.clear() self.paused.set() def unpause(self): """ Sets the :py:attr:`~.MailSenderThread.running` and :py:attr:`~.MailSenderThread.paused` flags correctly to indicate that the object is no longer paused. """ self.running.set() def stop(self): """ Requests that the email sending operation stop. It can not be resumed from the same position. This function blocks until the stop request has been processed and the thread exits. """ self.should_stop.set() self.unpause() if self.is_alive(): self.join() def missing_files(self): """ Return a list of all missing or unreadable files which are referenced by the message template. :return: The list of unusable files. :rtype: list """ missing = [] attachment = self.config.get('mailer.attachment_file') if attachment and not os.access(attachment, os.R_OK): missing.append(attachment) msg_template = self.config['mailer.html_file'] if not os.access(msg_template, os.R_OK): missing.append(msg_template) return missing self._prepare_env() for attachment in template_environment.attachment_images.keys(): if not os.access(attachment, os.R_OK): missing.append(attachment) return missing
guitarmanj/king-phisher
king_phisher/client/mailer.py
Python
bsd-3-clause
38,476
0.023365
#!/usr/bin/env python # Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """A class to start/stop the apache http server used by layout tests.""" import logging import os import re import sys from webkitpy.layout_tests.servers import http_server_base _log = logging.getLogger(__name__) class LayoutTestApacheHttpd(http_server_base.HttpServerBase): def __init__(self, port_obj, output_dir, additional_dirs=None, number_of_servers=None): """Args: port_obj: handle to the platform-specific routines output_dir: the absolute path to the layout test result directory """ http_server_base.HttpServerBase.__init__(self, port_obj, number_of_servers) # We use the name "httpd" instead of "apache" to make our paths (e.g. the pid file: /tmp/WebKit/httpd.pid) # match old-run-webkit-tests: https://bugs.webkit.org/show_bug.cgi?id=63956 self._name = 'httpd' self._mappings = [{'port': 8000}, {'port': 8080}, {'port': 8081}, {'port': 8443, 'sslcert': True}] self._output_dir = output_dir self._filesystem.maybe_make_directory(output_dir) self._pid_file = self._filesystem.join(self._runtime_path, '%s.pid' % self._name) test_dir = self._port_obj.layout_tests_dir() js_test_resources_dir = self._filesystem.join(test_dir, "fast", "js", "resources") media_resources_dir = self._filesystem.join(test_dir, "media") mime_types_path = self._filesystem.join(test_dir, "http", "conf", "mime.types") cert_file = self._filesystem.join(test_dir, "http", "conf", "webkit-httpd.pem") access_log = self._filesystem.join(output_dir, "access_log.txt") error_log = self._filesystem.join(output_dir, "error_log.txt") document_root = self._filesystem.join(test_dir, "http", "tests") # FIXME: We shouldn't be calling a protected method of _port_obj! executable = self._port_obj._path_to_apache() start_cmd = [executable, '-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir), '-C', "\'DocumentRoot \"%s\"\'" % document_root, '-c', "\'Alias /js-test-resources \"%s\"'" % js_test_resources_dir, '-c', "\'Alias /media-resources \"%s\"'" % media_resources_dir, '-C', "\'Listen %s\'" % "127.0.0.1:8000", '-C', "\'Listen %s\'" % "127.0.0.1:8081", '-c', "\'TypesConfig \"%s\"\'" % mime_types_path, '-c', "\'CustomLog \"%s\" common\'" % access_log, '-c', "\'ErrorLog \"%s\"\'" % error_log, '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME", os.environ.get("USER", "")), '-c', "\'PidFile %s'" % self._pid_file, '-k', "start"] if additional_dirs: for alias, path in additional_dirs.iteritems(): start_cmd += ['-c', "\'Alias %s \"%s\"\'" % (alias, path), # Disable CGI handler for additional dirs. '-c', "\'<Location %s>\'" % alias, '-c', "\'RemoveHandler .cgi .pl\'", '-c', "\'</Location>\'"] if self._number_of_servers: start_cmd += ['-c', "\'StartServers %d\'" % self._number_of_servers, '-c', "\'MinSpareServers %d\'" % self._number_of_servers, '-c', "\'MaxSpareServers %d\'" % self._number_of_servers] stop_cmd = [executable, '-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir), '-c', "\'PidFile %s'" % self._pid_file, '-k', "stop"] start_cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file]) # Join the string here so that Cygwin/Windows and Mac/Linux # can use the same code. Otherwise, we could remove the single # quotes above and keep cmd as a sequence. # FIXME: It's unclear if this is still needed. self._start_cmd = " ".join(start_cmd) self._stop_cmd = " ".join(stop_cmd) def _get_apache_config_file_path(self, test_dir, output_dir): """Returns the path to the apache config file to use. Args: test_dir: absolute path to the LayoutTests directory. output_dir: absolute path to the layout test results directory. """ httpd_config = self._port_obj._path_to_apache_config_file() httpd_config_copy = os.path.join(output_dir, "httpd.conf") httpd_conf = self._filesystem.read_text_file(httpd_config) # FIXME: Why do we need to copy the config file since we're not modifying it? self._filesystem.write_text_file(httpd_config_copy, httpd_conf) return httpd_config_copy def _spawn_process(self): _log.debug('Starting %s server, cmd="%s"' % (self._name, str(self._start_cmd))) retval, err = self._run(self._start_cmd) if retval or len(err): raise http_server_base.ServerError('Failed to start %s: %s' % (self._name, err)) # For some reason apache isn't guaranteed to have created the pid file before # the process exits, so we wait a little while longer. if not self._wait_for_action(lambda: self._filesystem.exists(self._pid_file)): raise http_server_base.ServerError('Failed to start %s: no pid file found' % self._name) return int(self._filesystem.read_text_file(self._pid_file)) def _stop_running_server(self): # If apache was forcefully killed, the pid file will not have been deleted, so check # that the process specified by the pid_file no longer exists before deleting the file. if self._pid and not self._executive.check_running_pid(self._pid): self._filesystem.remove(self._pid_file) return retval, err = self._run(self._stop_cmd) if retval or len(err): raise http_server_base.ServerError('Failed to stop %s: %s' % (self._name, err)) # For some reason apache isn't guaranteed to have actually stopped after # the stop command returns, so we wait a little while longer for the # pid file to be removed. if not self._wait_for_action(lambda: not self._filesystem.exists(self._pid_file)): raise http_server_base.ServerError('Failed to stop %s: pid file still exists' % self._name) def _run(self, cmd): # Use shell=True because we join the arguments into a string for # the sake of Window/Cygwin and it needs quoting that breaks # shell=False. # FIXME: We should not need to be joining shell arguments into strings. # shell=True is a trail of tears. # Note: Not thread safe: http://bugs.python.org/issue2320 process = self._executive.popen(cmd, shell=True, stderr=self._executive.PIPE) process.wait() retval = process.returncode err = process.stderr.read() return (retval, err)
youfoh/webkit-efl
Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
Python
lgpl-2.1
8,511
0.005522
import vtk import numpy as np import matplotlib.pyplot as plt def vtkmatrix_to_numpy(matrix): m = np.ones((4, 4)) for i in range(4): for j in range(4): m[i, j] = matrix.GetElement(i, j) return m """ Get transformation from viewpoint coordinates to real-world coordinates. (tmat) """ # vtk rendering objects ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # create cube and add it to renderer # (not needed except to validate positioning of camera) cube = vtk.vtkCubeSource() cube.SetCenter(0.0, 0.0, 3.0) cubeMapper = vtk.vtkPolyDataMapper() cubeMapper.SetInputConnection(cube.GetOutputPort()) cubeActor = vtk.vtkActor() cubeActor.SetMapper(cubeMapper) ren.AddActor(cubeActor) # set the intrinsic parameters renWin.SetSize((640, 480)) cam = ren.GetActiveCamera() cam.SetViewAngle(60.0) cam.SetClippingRange(0.8, 4.0) iren.GetInteractorStyle().SetAutoAdjustCameraClippingRange(0) # have it positioned at the origin and looking down the z axis cam.SetPosition(0.0, 0.0, 0.0) cam.SetFocalPoint(0.0, 0.0, 1.0) iren.Initialize() iren.Render() vtktmat = cam.GetCompositeProjectionTransformMatrix( ren.GetTiledAspectRatio(), 0.0, 1.0) vtktmat.Invert() tmat = vtkmatrix_to_numpy(vtktmat) """ Plot """ plt.figure(frameon=False, dpi=100) nvalues = 100 noise = 0.002 # vpc - view point coordinates # wc - world coordinates vpc = np.zeros((4, nvalues)) vpc[2, :] = np.linspace(0, 1, nvalues) vpc[3, :] = np.ones((1, vpc.shape[1])) wc = np.dot(tmat, vpc) wc = wc / wc[3] wz = wc[2, :] plt.plot(vpc[2, :], wz, '-o', color='b', markersize=2, markerfacecolor='g') # nvpc, nwc - same as vpc, wc but with noise nvpc = vpc.copy() nvpc[2, :] += noise nwc = np.dot(tmat, nvpc) nwc = nwc / nwc[3] nwz = nwc[2, :] # plt.plot(vpc[2, :], # nwz, # color='r') # nvpc, nwc - same as vpc, wc but with noise nvpc = vpc.copy() nvpc[2, :] -= noise nwc = np.dot(tmat, nvpc) nwc = nwc / nwc[3] nwz = nwc[2, :] # plt.plot(vpc[2, :], # nwz, # color='r') """ Plot display properties """ plt.title('View to Sensor Coordinates Along Z Axis') plt.xlabel('View Coordinates Z (normalized units)') plt.ylabel('Sensor Coordinates Z (m)') plt.grid(True) ax = plt.gca() for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]): item.set_fontsize(18) for item in (ax.get_xticklabels() + ax.get_yticklabels()): item.set_fontsize(12) # plt.savefig('plot_depth.png') plt.show() """ Plot """ # wc plt.figure(frameon=False, dpi=100) plt.plot(wz, (wz-nwz)*100, '-o', color='b', markersize=2, markerfacecolor='g', label='MABDI') plt.plot(wz, (0.5*2.85e-5*pow(wz*100, 2)), color='r', label='Khoshelham Noise Model') """ Plot display properties """ plt.title('Standard Deviation of Noise Along Sensor Z') plt.xlabel('Distance to Actual Point (m)') plt.ylabel('Standard Deviation of Error (cm)') plt.grid(True) ax = plt.gca() for item in ([ax.title, ax.xaxis.label, ax.yaxis.label]): item.set_fontsize(18) for item in (ax.get_xticklabels() + ax.get_yticklabels()): item.set_fontsize(12) plt.legend(loc='upper left') # plt.savefig('plot_depth.png') plt.show() """ val1 = 0.6 val2 = 0.8 noise = 0.001 vp = np.array([(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), (val1 - noise, val1, val1 + noise, val2 - noise, val2, val2 + noise), (1.0, 1.0, 1.0, 1.0, 1.0, 1.0)]) wp = np.dot(tmat, vp) wp = wp / wp[3] vpz = vp[2, :] wpz = wp[2, :] plt.plot(vpz, wpz, 'o', color='b', markersize=9, markerfacecolor='r') string = 'with noise = {:.3f}\n' \ ' x y \n' \ '({:.4f}, {:.4f})\n' \ '({:.4f}, {:.4f})\n' \ '({:.4f}, {:.4f})\n' \ 'diff = {:.2f} (cm)'.format(noise, vpz[0], wpz[0], vpz[1], wpz[1], vpz[2], wpz[2], abs(wpz[2] - wpz[0]) * 100) bbox = {'edgecolor': 'black', 'facecolor': 'white', 'pad': 10} plt.text(0.305, 1.72, string, bbox=bbox) string = 'with noise = {:.3f}\n' \ ' x y \n' \ '({:.4f}, {:.4f})\n' \ '({:.4f}, {:.4f})\n' \ '({:.4f}, {:.4f})\n' \ 'diff = {:.2f} (cm)'.format(noise, vpz[3], wpz[3], vpz[4], wpz[4], vpz[5], wpz[5], abs(wpz[5] - wpz[3]) * 100) plt.text(0.835, 1.20, string, bbox=bbox) """
lucasplus/MABDI
scripts/Plot_Depth_Image_To_Z.py
Python
bsd-3-clause
4,777
0.000837
#!/usr/bin/env python import sys import struct import string class QcowHeaderExtension: def __init__(self, magic, length, data): self.magic = magic self.length = length self.data = data @classmethod def create(cls, magic, data): return QcowHeaderExtension(magic, len(data), data) class QcowHeader: uint32_t = 'I' uint64_t = 'Q' fields = [ # Version 2 header fields [ uint32_t, '%#x', 'magic' ], [ uint32_t, '%d', 'version' ], [ uint64_t, '%#x', 'backing_file_offset' ], [ uint32_t, '%#x', 'backing_file_size' ], [ uint32_t, '%d', 'cluster_bits' ], [ uint64_t, '%d', 'size' ], [ uint32_t, '%d', 'crypt_method' ], [ uint32_t, '%d', 'l1_size' ], [ uint64_t, '%#x', 'l1_table_offset' ], [ uint64_t, '%#x', 'refcount_table_offset' ], [ uint32_t, '%d', 'refcount_table_clusters' ], [ uint32_t, '%d', 'nb_snapshots' ], [ uint64_t, '%#x', 'snapshot_offset' ], # Version 3 header fields [ uint64_t, '%#x', 'incompatible_features' ], [ uint64_t, '%#x', 'compatible_features' ], [ uint64_t, '%#x', 'autoclear_features' ], [ uint32_t, '%d', 'refcount_order' ], [ uint32_t, '%d', 'header_length' ], ]; fmt = '>' + ''.join(field[0] for field in fields) def __init__(self, fd): buf_size = struct.calcsize(QcowHeader.fmt) fd.seek(0) buf = fd.read(buf_size) header = struct.unpack(QcowHeader.fmt, buf) self.__dict__ = dict((field[2], header[i]) for i, field in enumerate(QcowHeader.fields)) self.set_defaults() self.cluster_size = 1 << self.cluster_bits fd.seek(self.header_length) self.load_extensions(fd) if self.backing_file_offset: fd.seek(self.backing_file_offset) self.backing_file = fd.read(self.backing_file_size) else: self.backing_file = None def set_defaults(self): if self.version == 2: self.incompatible_features = 0 self.compatible_features = 0 self.autoclear_features = 0 self.refcount_order = 4 self.header_length = 72 def load_extensions(self, fd): self.extensions = [] if self.backing_file_offset != 0: end = min(self.cluster_size, self.backing_file_offset) else: end = self.cluster_size while fd.tell() < end: (magic, length) = struct.unpack('>II', fd.read(8)) if magic == 0: break else: padded = (length + 7) & ~7 data = fd.read(padded) self.extensions.append(QcowHeaderExtension(magic, length, data)) def update_extensions(self, fd): fd.seek(self.header_length) extensions = self.extensions extensions.append(QcowHeaderExtension(0, 0, "")) for ex in extensions: buf = struct.pack('>II', ex.magic, ex.length) fd.write(buf) fd.write(ex.data) if self.backing_file != None: self.backing_file_offset = fd.tell() fd.write(self.backing_file) if fd.tell() > self.cluster_size: raise Exception("I think I just broke the image...") def update(self, fd): header_bytes = self.header_length self.update_extensions(fd) fd.seek(0) header = tuple(self.__dict__[f] for t, p, f in QcowHeader.fields) buf = struct.pack(QcowHeader.fmt, *header) buf = buf[0:header_bytes-1] fd.write(buf) def dump(self): for f in QcowHeader.fields: print "%-25s" % f[2], f[1] % self.__dict__[f[2]] print "" def dump_extensions(self): for ex in self.extensions: data = ex.data[:ex.length] if all(c in string.printable for c in data): data = "'%s'" % data else: data = "<binary>" print "Header extension:" print "%-25s %#x" % ("magic", ex.magic) print "%-25s %d" % ("length", ex.length) print "%-25s %s" % ("data", data) print "" def cmd_dump_header(fd): h = QcowHeader(fd) h.dump() h.dump_extensions() def cmd_set_header(fd, name, value): try: value = int(value, 0) except: print "'%s' is not a valid number" % value sys.exit(1) fields = (field[2] for field in QcowHeader.fields) if not name in fields: print "'%s' is not a known header field" % name sys.exit(1) h = QcowHeader(fd) h.__dict__[name] = value h.update(fd) def cmd_add_header_ext(fd, magic, data): try: magic = int(magic, 0) except: print "'%s' is not a valid magic number" % magic sys.exit(1) h = QcowHeader(fd) h.extensions.append(QcowHeaderExtension.create(magic, data)) h.update(fd) def cmd_del_header_ext(fd, magic): try: magic = int(magic, 0) except: print "'%s' is not a valid magic number" % magic sys.exit(1) h = QcowHeader(fd) found = False for ex in h.extensions: if ex.magic == magic: found = True h.extensions.remove(ex) if not found: print "No such header extension" return h.update(fd) def cmd_set_feature_bit(fd, group, bit): try: bit = int(bit, 0) if bit < 0 or bit >= 64: raise ValueError except: print "'%s' is not a valid bit number in range [0, 64)" % bit sys.exit(1) h = QcowHeader(fd) if group == 'incompatible': h.incompatible_features |= 1 << bit elif group == 'compatible': h.compatible_features |= 1 << bit elif group == 'autoclear': h.autoclear_features |= 1 << bit else: print "'%s' is not a valid group, try 'incompatible', 'compatible', or 'autoclear'" % group sys.exit(1) h.update(fd) cmds = [ [ 'dump-header', cmd_dump_header, 0, 'Dump image header and header extensions' ], [ 'set-header', cmd_set_header, 2, 'Set a field in the header'], [ 'add-header-ext', cmd_add_header_ext, 2, 'Add a header extension' ], [ 'del-header-ext', cmd_del_header_ext, 1, 'Delete a header extension' ], [ 'set-feature-bit', cmd_set_feature_bit, 2, 'Set a feature bit'], ] def main(filename, cmd, args): fd = open(filename, "r+b") try: for name, handler, num_args, desc in cmds: if name != cmd: continue elif len(args) != num_args: usage() return else: handler(fd, *args) return print "Unknown command '%s'" % cmd finally: fd.close() def usage(): print "Usage: %s <file> <cmd> [<arg>, ...]" % sys.argv[0] print "" print "Supported commands:" for name, handler, num_args, desc in cmds: print " %-20s - %s" % (name, desc) if __name__ == '__main__': if len(sys.argv) < 3: usage() sys.exit(1) main(sys.argv[1], sys.argv[2], sys.argv[3:])
nypdmax/NUMA
tools/qemu-xen/tests/qemu-iotests/qcow2.py
Python
gpl-2.0
7,287
0.009332
# Natural Language Toolkit: Interface to Weka Classsifiers # # Copyright (C) 2001-2008 University of Pennsylvania # Author: Edward Loper <edloper@gradient.cis.upenn.edu> # URL: <http://nltk.sf.net> # For license information, see LICENSE.TXT # # $Id: naivebayes.py 2063 2004-07-17 21:02:24Z edloper $ import time, tempfile, os, os.path, subprocess, re from api import * from nltk.probability import * from nltk.internals import java, config_java """ Classifiers that make use of the external 'Weka' package. """ _weka_classpath = None _weka_search = ['.', '/usr/share/weka', '/usr/local/share/weka', '/usr/lib/weka', '/usr/local/lib/weka',] def config_weka(classpath=None): global _weka_classpath # Make sure java's configured first. config_java() if classpath is not None: _weka_classpath = classpath if _weka_classpath is None: searchpath = _weka_search if 'WEKAHOME' in os.environ: searchpath.insert(0, os.environ['WEKAHOME']) for path in searchpath: if os.path.exists(os.path.join(path, 'weka.jar')): _weka_classpath = os.path.join(path, 'weka.jar') print '[Found Weka: %s]' % _weka_classpath if _weka_classpath is None: raise LookupError('Unable to find weka.jar! Use config_weka() ' 'or set the WEKAHOME environment variable. ' 'For more information about Weka, please see ' 'http://www.cs.waikato.ac.nz/ml/weka/') class WekaClassifier(ClassifierI): def __init__(self, formatter, model_filename): self._formatter = formatter self._model = model_filename def batch_prob_classify(self, featuresets): return self._batch_classify(featuresets, ['-p', '0', '-distribution']) def batch_classify(self, featuresets): return self._batch_classify(featuresets, ['-p', '0']) def _batch_classify(self, featuresets, options): # Make sure we can find java & weka. config_weka() temp_dir = tempfile.mkdtemp() try: # Write the test data file. test_filename = os.path.join(temp_dir, 'test.arff') self._formatter.write(test_filename, featuresets) # Call weka to classify the data. cmd = ['weka.classifiers.bayes.NaiveBayes', '-l', self._model, '-T', test_filename] + options (stdout, stderr) = java(cmd, classpath=_weka_classpath, stdout=subprocess.PIPE) # Parse weka's output. return self.parse_weka_output(stdout.split('\n')) finally: for f in os.listdir(temp_dir): os.remove(os.path.join(temp_dir, f)) os.rmdir(temp_dir) def parse_weka_distribution(self, s): probs = [float(v) for v in re.split('[*,]+', s) if v.strip()] probs = dict(zip(self._formatter.labels(), probs)) return DictionaryProbDist(probs) def parse_weka_output(self, lines): if lines[0].split() == ['inst#', 'actual', 'predicted', 'error', 'prediction']: return [line.split()[2].split(':')[1] for line in lines[1:] if line.strip()] elif lines[0].split() == ['inst#', 'actual', 'predicted', 'error', 'distribution']: return [self.parse_weka_distribution(line.split()[-1]) for line in lines[1:] if line.strip()] else: for line in lines[:10]: print line raise ValueError('Unhandled output format -- your version ' 'of weka may not be supported.\n' ' Header: %s' % lines[0]) @staticmethod def train(model_filename, featuresets, quiet=True): # Make sure we can find java & weka. config_weka() # Build an ARFF formatter. formatter = ARFF_Formatter.from_train(featuresets) temp_dir = tempfile.mkdtemp() try: # Write the training data file. train_filename = os.path.join(temp_dir, 'train.arff') formatter.write(train_filename, featuresets) # Train the weka model. cmd = ['weka.classifiers.bayes.NaiveBayes', '-d', model_filename, '-t', train_filename] if quiet: stdout = subprocess.PIPE else: stdout = None java(cmd, classpath=_weka_classpath, stdout=stdout) # Return the new classifier. return WekaClassifier(formatter, model_filename) finally: for f in os.listdir(temp_dir): os.remove(os.path.join(temp_dir, f)) os.rmdir(temp_dir) class ARFF_Formatter: """ Converts featuresets and labeled featuresets to ARFF-formatted strings, appropriate for input into Weka. """ def __init__(self, labels, features): """ @param labels: A list of all labels that can be generated. @param features: A list of feature specifications, where each feature specification is a tuple (fname, ftype); and ftype is an ARFF type string such as NUMERIC or STRING. """ self._labels = labels self._features = features def format(self, tokens): return self.header_section() + self.data_section(tokens) def labels(self): return list(self._labels) def write(self, filename, tokens): f = open(filename, 'w') f.write(self.format(tokens)) f.close() @staticmethod def from_train(tokens): # Find the set of all attested labels. labels = set(label for (tok,label) in tokens) # Determine the types of all features. features = {} for tok, label in tokens: for (fname, fval) in tok.items(): if issubclass(type(fval), bool): ftype = '{True, False}' elif issubclass(type(fval), (int, float, long, bool)): ftype = 'NUMERIC' elif issubclass(type(fval), basestring): ftype = 'STRING' elif fval is None: continue # can't tell the type. else: raise ValueError('Unsupported value type %r' % ftype) if features.get(fname, ftype) != ftype: raise ValueError('Inconsistent type for %s' % fname) features[fname] = ftype features = sorted(features.items()) return ARFF_Formatter(labels, features) def header_section(self): # Header comment. s = ('% Weka ARFF file\n' + '% Generated automatically by NLTK\n' + '%% %s\n\n' % time.ctime()) # Relation name s += '@RELATION rel\n\n' # Input attribute specifications for fname, ftype in self._features: s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype) # Label attribute specification s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels)) return s def data_section(self, tokens, labeled=None): """ @param labeled: Indicates whether the given tokens are labeled or not. If C{None}, then the tokens will be assumed to be labeled if the first token's value is a tuple or list. """ # Check if the tokens are labeled or unlabeled. If unlabeled, # then use 'None' if labeled is None: labeled = tokens and isinstance(tokens[0], (tuple, list)) if not labeled: tokens = [(tok, None) for tok in tokens] # Data section s = '\n@DATA\n' for (tok, label) in tokens: for fname, ftype in self._features: s += '%s,' % self._fmt_arff_val(tok.get(fname)) s += '%s\n' % self._fmt_arff_val(label) return s def _fmt_arff_val(self, fval): if fval is None: return '?' elif isinstance(fval, (bool, int, long)): return '%s' % fval elif isinstance(fval, float): return '%r' % fval else: return '%r' % fval if __name__ == '__main__': from nltk.classify.util import names_demo,binary_names_demo_features def make_classifier(featuresets): return WekaClassifier.train('/tmp/name.model', featuresets) classifier = names_demo(make_classifier,binary_names_demo_features)
hectormartinez/rougexstem
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk/classify/weka.py
Python
apache-2.0
8,796
0.004093
import sys from services.spawn import MobileTemplate from services.spawn import WeaponTemplate from resources.datatables import WeaponType from resources.datatables import Difficulty from resources.datatables import Options from java.util import Vector def addTemplate(core): mobileTemplate = MobileTemplate() mobileTemplate.setCreatureName('sickly_decay_mite_queen') mobileTemplate.setLevel(19) mobileTemplate.setDifficulty(Difficulty.NORMAL) mobileTemplate.setMinSpawnDistance(4) mobileTemplate.setMaxSpawnDistance(8) mobileTemplate.setDeathblow(False) mobileTemplate.setScale(1) mobileTemplate.setMeatType("Insect Meat") mobileTemplate.setMeatAmount(15) mobileTemplate.setSocialGroup("decay mite") mobileTemplate.setAssistRange(0) mobileTemplate.setStalker(False) mobileTemplate.setOptionsBitmask(Options.ATTACKABLE) templates = Vector() templates.add('object/mobile/shared_bark_mite_hue.iff') mobileTemplate.setTemplates(templates) weaponTemplates = Vector() weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic') weaponTemplates.add(weapontemplate) mobileTemplate.setWeaponTemplateVector(weaponTemplates) attacks = Vector() attacks.add('bm_bite_4') attacks.add('bm_bolster_armor_4') attacks.add('bm_enfeeble_4') mobileTemplate.setDefaultAttack('creatureRangedAttack') mobileTemplate.setAttacks(attacks) core.spawnService.addMobileTemplate('sickly_decay_mite_queen', mobileTemplate) return
agry/NGECore2
scripts/mobiles/talus/sickly_decay_mite_queen.py
Python
lgpl-3.0
1,557
0.026975
# coding: utf-8 from django.core.management.base import BaseCommand from ...fetch.fetchers import VerifyFetcher class Command(BaseCommand): """Updates the stored data about the Twitter user for one or all Accounts. For one account: ./manage.py fetch_accounts --account=philgyford For all accounts: ./manage.py fetch_accounts """ help = "Fetches and updates data about Accounts' Twitter Users" def add_arguments(self, parser): parser.add_argument( "--account", action="store", default=False, help="Only fetch for one Twitter account.", ) def handle(self, *args, **options): # We might be fetching for a specific account or all (None). account = options["account"] if options["account"] else None results = VerifyFetcher(screen_name=account).fetch() # results should be a list of dicts, either: # { 'account': 'thescreenname', # 'success': True # } # or: # { 'account': 'thescreenname', # 'success': False, # 'messages': ["This screen_name doesn't exist"] # } if options.get("verbosity", 1) > 0: for result in results: if result["success"]: self.stdout.write("Fetched @%s" % result["account"]) else: self.stderr.write( "Could not fetch @%s: %s" % (result["account"], result["messages"][0]) )
philgyford/django-ditto
ditto/twitter/management/commands/fetch_twitter_accounts.py
Python
mit
1,562
0
import frappe def execute(): frappe.reload_doc("selling", "doctype", "sales_order") docs = frappe.get_all("Sales Order", { "advance_paid": ["!=", 0] }, "name") for doc in docs: frappe.db.set_value("Sales Order", doc.name, "advance_received", 1, update_modified=False)
neilLasrado/erpnext
erpnext/patches/v13_0/update_advance_received_in_sales_order.py
Python
gpl-3.0
300
0.01
""" Sphinx plugins for Django documentation. """ import docutils.nodes import docutils.transforms import sphinx import sphinx.addnodes import sphinx.directives import sphinx.environment import sphinx.roles from docutils import nodes def setup(app): app.add_crossref_type( directivename = "setting", rolename = "setting", indextemplate = "pair: %s; setting", ) app.add_crossref_type( directivename = "templatetag", rolename = "ttag", indextemplate = "pair: %s; template tag", ) app.add_crossref_type( directivename = "templatefilter", rolename = "tfilter", indextemplate = "pair: %s; template filter", ) app.add_crossref_type( directivename = "fieldlookup", rolename = "lookup", indextemplate = "pair: %s, field lookup type", ) app.add_description_unit( directivename = "django-admin", rolename = "djadmin", indextemplate = "pair: %s; django-admin command", parse_node = parse_django_admin_node, ) app.add_description_unit( directivename = "django-admin-option", rolename = "djadminopt", indextemplate = "pair: %s; django-admin command-line option", parse_node = lambda env, sig, signode: \ sphinx.directives.parse_option_desc(signode, sig), ) app.add_config_value('django_next_version', '0.0', True) app.add_directive('versionadded', parse_version_directive, 1, (1, 1, 1)) app.add_directive('versionchanged', parse_version_directive, 1, (1, 1, 1)) app.add_transform(SuppressBlockquotes) def parse_version_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): env = state.document.settings.env is_nextversion = env.config.django_next_version == arguments[0] ret = [] node = sphinx.addnodes.versionmodified() ret.append(node) if not is_nextversion: if len(arguments) == 1: linktext = 'Please, see the release notes <releases-%s>' % ( arguments[0]) xrefs = sphinx.roles.xfileref_role('ref', linktext, linktext, lineno, state) node.extend(xrefs[0]) node['version'] = arguments[0] else: node['version'] = "Development version" node['type'] = name if len(arguments) == 2: inodes, messages = state.inline_text(arguments[1], lineno+1) node.extend(inodes) if content: state.nested_parse(content, content_offset, node) ret = ret + messages env.note_versionchange(node['type'], node['version'], node, lineno) return ret class SuppressBlockquotes(docutils.transforms.Transform): """ Remove the default blockquotes that encase indented list, tables, etc. """ default_priority = 300 suppress_blockquote_child_nodes = ( docutils.nodes.bullet_list, docutils.nodes.enumerated_list, docutils.nodes.definition_list, docutils.nodes.literal_block, docutils.nodes.doctest_block, docutils.nodes.line_block, docutils.nodes.table, ) def apply(self): for node in self.document.traverse(docutils.nodes.block_quote): if len(node.children) == 1 and \ isinstance(node.children[0], self.suppress_blockquote_child_nodes): node.replace_self(node.children[0]) def parse_django_admin_node(env, sig, signode): command = sig.split(' ')[0] env._django_curr_admin_command = command title = "django-admin.py %s" % sig signode += sphinx.addnodes.desc_name(title, title) return sig
Yelp/pyes
docs/_ext/djangodocs.py
Python
bsd-3-clause
3,769
0.011409
from dolfin import * import numpy as np import pandas as pd n = 6 Dim = np.zeros((n,1)) ErrorL2 = np.zeros((n,1)) ErrorH1 = np.zeros((n,1)) OrderL2 = np.zeros((n,1)) OrderH1 = np.zeros((n,1)) # parameters['reorder_dofs_serial'] = False for x in range(1,n+1): parameters['form_compiler']['quadrature_degree'] = -1 mesh = UnitSquareMesh(2**x,2**x) V = VectorFunctionSpace(mesh, "CG", 2) class u_in(Expression): def __init__(self): self.p = 1 def eval_cell(self, values, x, ufc_cell): values[0] = x[0]*x[0]*x[0] values[1] = x[1]*x[1]*x[1] def value_shape(self): return (2,) class F_in(Expression): def __init__(self): self.p = 1 def eval_cell(self, values, x, ufc_cell): values[0] = -6*x[0] values[1] = -6*x[1] def value_shape(self): return (2,) u0 = u_in() F = F_in() u = TrialFunction(V) v = TestFunction(V) a = inner(grad(u), grad(v))*dx L = inner(F, v)*dx def boundary(x, on_boundary): return on_boundary bc = DirichletBC(V, u0, boundary) u = Function(V) solve(a == L, u, bcs=bc, solver_parameters={"linear_solver": "lu"}, form_compiler_parameters={"optimize": True}) parameters['form_compiler']['quadrature_degree'] = 8 Vexact = VectorFunctionSpace(mesh, "CG", 4) ue = interpolate(u0, Vexact) e = ue - u Dim[x-1] = V.dim() ErrorL2[x-1] = sqrt(abs(assemble(inner(e,e)*dx))) ErrorH1[x-1] = sqrt(abs(assemble(inner(grad(e),grad(e))*dx))) if (x > 1): OrderL2[x-1] = abs(np.log2(ErrorL2[x-1]/ErrorL2[x-2])) OrderH1[x-1] = abs(np.log2(ErrorH1[x-1]/ErrorH1[x-2])) TableTitles = ["DoF","L2-erro","L2-order","H1-error","H1-order"] TableValues = np.concatenate((Dim,ErrorL2,OrderL2,ErrorH1,OrderH1),axis=1) Table = pd.DataFrame(TableValues, columns = TableTitles) pd.set_option('precision',3) print Table
wathen/PhD
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/Hartman2D/Laplacian.py
Python
mit
1,981
0.014134
#!/usr/bin/env python # coding=utf-8 import threading import time class timer(threading.Thread): #The timer class is derived from the class threading.Thread def __init__(self, num, interval): threading.Thread.__init__(self) self.thread_num = num self.interval = interval self.thread_stop = False def run(self): #Overwrite run() method, put what you want the thread do here while not self.thread_stop: print 'Thread Object(%d), Time:%s/n' %(self.thread_num, time.ctime()) time.sleep(self.interval) def stop(self): self.thread_stop = True def test(): thread1 = timer(1, 1) thread2 = timer(2, 2) thread1.start() thread2.start() time.sleep(10) thread1.stop() thread2.stop() return if __name__ == '__main__': test()
zhaochl/python-utils
utils/thread/time_thread.py
Python
apache-2.0
846
0.01773
# My computer was failing to recognize wifi networks after being woken up from sleep so this uses the network manager command # line tool to force my computer to recognize the network I type in to the terminal. import subprocess network_name = raw_input("What is the name of your network? ") subprocess.check_call(['nmcli', 'c', 'up', 'id', network_name])
caryben/Ubuntu-bug-fixes
hidden_network_workaround.py
Python
mit
357
0.005602
"""Tests for the system_log component."""
fbradyirl/home-assistant
tests/components/system_log/__init__.py
Python
apache-2.0
42
0
import binascii import sys class ProtocolTreeNode(object): def __init__(self, tag, attributes = None, children = None, data = None): self.tag = tag self.attributes = attributes or {} self.children = children or [] self.data = data assert type(self.children) is list, "Children must be a list, got %s" % type(self.children) def __eq__(self, protocolTreeNode): """ :param protocolTreeNode: ProtocolTreeNode :return: bool """ # if protocolTreeNode.__class__ == ProtocolTreeNode\ and self.tag == protocolTreeNode.tag\ and self.data == protocolTreeNode.data\ and self.attributes == protocolTreeNode.attributes\ and len(self.getAllChildren()) == len(protocolTreeNode.getAllChildren()): found = False for c in self.getAllChildren(): for c2 in protocolTreeNode.getAllChildren(): if c == c2: found = True break if not found: return False found = False for c in protocolTreeNode.getAllChildren(): for c2 in self.getAllChildren(): if c == c2: found = True break if not found: return False return True return False def __hash__(self): return hash(self.tag) ^ hash(tuple(self.attributes.items())) ^ hash(self.data) def toString(self): out = "<"+self.tag if self.attributes is not None: for key,val in self.attributes.items(): out+= " "+key+'="'+val+'"' out+= ">\n" if self.data is not None: if type(self.data) is bytearray: try: out += "%s" % self.data.decode() except UnicodeDecodeError: out += binascii.hexlify(self.data) else: try: out += "%s" % self.data except UnicodeDecodeError: try: out += "%s" % self.data.decode() except UnicodeDecodeError: out += binascii.hexlify(self.data) if type(self.data) is str and sys.version_info >= (3,0): out += "\nHEX3:%s\n" % binascii.hexlify(self.data.encode('latin-1')) else: out += "\nHEX:%s\n" % binascii.hexlify(self.data) for c in self.children: try: out += c.toString() except UnicodeDecodeError: out += "[ENCODED DATA]\n" out+= "</"+self.tag+">\n" return out def __str__(self): return self.toString() def getData(self): return self.data def setData(self, data): self.data = data @staticmethod def tagEquals(node,string): return node is not None and node.tag is not None and node.tag == string @staticmethod def require(node,string): if not ProtocolTreeNode.tagEquals(node,string): raise Exception("failed require. string: "+string); def __getitem__(self, key): return self.getAttributeValue(key) def __setitem__(self, key, val): self.setAttribute(key, val) def __delitem__(self, key): self.removeAttribute(key) def getChild(self,identifier): if type(identifier) == int: if len(self.children) > identifier: return self.children[identifier] else: return None for c in self.children: if identifier == c.tag: return c return None def hasChildren(self): return len(self.children) > 0 def addChild(self, childNode): self.children.append(childNode) def addChildren(self, children): for c in children: self.addChild(c) def getAttributeValue(self,string): try: return self.attributes[string] except KeyError: return None def removeAttribute(self, key): if key in self.attributes: del self.attributes[key] def setAttribute(self, key, value): self.attributes[key] = value def getAllChildren(self,tag = None): ret = [] if tag is None: return self.children for c in self.children: if tag == c.tag: ret.append(c) return ret
metis-ai/yowsup
yowsup/structs/protocoltreenode.py
Python
gpl-3.0
4,746
0.00906
import tests.periodicities.period_test as per per.buildModel((7 , 'M' , 200));
antoinecarme/pyaf
tests/periodicities/Month/Cycle_Month_200_M_7.py
Python
bsd-3-clause
81
0.049383
from django.conf.urls import patterns, include, url from django.contrib import admin from rest_framework import viewsets, routers from voting_app.models import Topic from voting_app.views import Vote from voting_app.serializer import TopicSerializer admin.autodiscover() # ViewSets define the view behavior. class TopicViewSet(viewsets.ModelViewSet): model = Topic serializer_class = TopicSerializer queryset = Topic.objects.all().filter(hide=False) router = routers.DefaultRouter() router.register(r'topics', TopicViewSet) urlpatterns = patterns('', url(r'^$', 'voting_app.views.index', name='index'), url(r'^', include(router.urls)), url(r'^vote/$', Vote.as_view()), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^admin/', include(admin.site.urls)), )
gc3-uzh-ch/django-simple-poll
voting/urls.py
Python
agpl-3.0
833
0.003601
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from django.utils.translation import ugettext_lazy as _t, ugettext as _ from desktop.lib.conf import Config, coerce_bool, coerce_csv BASEDIR = os.path.dirname(os.path.abspath(__file__)) USERNAME_SOURCES = ('attributes', 'nameid') def dict_list_map(value): if isinstance(value, str): d = {} for k, v in json.loads(value).iteritems(): d[k] = (v,) return d elif isinstance(value, dict): return value return None XMLSEC_BINARY = Config( key="xmlsec_binary", default="/usr/local/bin/xmlsec1", type=str, help=_t("Xmlsec1 binary path. This program should be executable by the user running Hue.")) ENTITY_ID = Config( key="entity_id", default="<base_url>/saml2/metadata/", type=str, help=_t("Entity ID for Hue acting as service provider. Can also accept a pattern where '<base_url>' will be replaced with server URL base.")) CREATE_USERS_ON_LOGIN = Config( key="create_users_on_login", default=True, type=coerce_bool, help=_t("Create users from IdP on login.")) ATTRIBUTE_MAP_DIR = Config( key="attribute_map_dir", default=os.path.abspath( os.path.join(BASEDIR, '..', '..', 'attribute-maps') ), type=str, private=True, help=_t("Attribute map directory contains files that map SAML attributes to pysaml2 attributes.")) ALLOW_UNSOLICITED = Config( key="allow_unsolicited", default=True, type=coerce_bool, private=True, help=_t("Allow responses that are initiated by the IdP.")) REQUIRED_ATTRIBUTES = Config( key="required_attributes", default=['uid'], type=coerce_csv, help=_t("Required attributes to ask for from IdP.")) OPTIONAL_ATTRIBUTES = Config( key="optional_attributes", default=[], type=coerce_csv, help=_t("Optional attributes to ask for from IdP.")) METADATA_FILE = Config( key="metadata_file", default=os.path.abspath( os.path.join(BASEDIR, '..', '..', 'examples', 'idp.xml') ), type=str, help=_t("IdP metadata in the form of a file. This is generally an XML file containing metadata that the Identity Provider generates.")) KEY_FILE = Config( key="key_file", default="", type=str, help=_t("key_file is the name of a PEM formatted file that contains the private key of the Hue service. This is presently used both to encrypt/sign assertions and as client key in a HTTPS session.")) CERT_FILE = Config( key="cert_file", default="", type=str, help=_t("This is the public part of the service private/public key pair. cert_file must be a PEM formatted certificate chain file.")) USER_ATTRIBUTE_MAPPING = Config( key="user_attribute_mapping", default={'uid': ('username', )}, type=dict_list_map, help=_t("A mapping from attributes in the response from the IdP to django user attributes.")) AUTHN_REQUESTS_SIGNED = Config( key="authn_requests_signed", default=False, type=coerce_bool, help=_t("Have Hue initiated authn requests be signed and provide a certificate.")) LOGOUT_REQUESTS_SIGNED = Config( key="logout_requests_signed", default=False, type=coerce_bool, help=_t("Have Hue initiated logout requests be signed and provide a certificate.")) USERNAME_SOURCE = Config( key="username_source", default="attributes", type=str, help=_t("Username can be sourced from 'attributes' or 'nameid'")) LOGOUT_ENABLED = Config( key="logout_enabled", default=True, type=coerce_bool, help=_t("Performs the logout or not.")) def config_validator(user): res = [] if USERNAME_SOURCE.get() not in USERNAME_SOURCES: res.append(("libsaml.username_source", _("username_source not configured properly. SAML integration may not work."))) return res
2013Commons/hue
desktop/libs/libsaml/src/libsaml/conf.py
Python
apache-2.0
4,427
0.005195
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-05-05 20:10 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('opconsole', '0026_auto_20170504_2048'), ] operations = [ migrations.AddField( model_name='device', name='name', field=models.CharField(default=b'unnamed', max_length=255), ), ]
baalkor/timetracking
opconsole/migrations/0027_device_name.py
Python
apache-2.0
468
0
""" Module to translate various names (unicode, LaTeX & other text) for characters to encodings in the Symbol font standard encodings. Also, provide grace markup strings for them. It recognizes unicode names for the greek alphabet and most of the useful symbols in the Symbol font. Marcus Mendenhall, Vanderbilt University, 2006 symbol_mapping.py,v 1.2 2009/04/03 00:32:07 mendenhall Exp """ #a tuple of tuple of the position of the character in the standard Symbol encoding, and all aliases _symbols=[ (0xa0, u"\u2202", "euro"), (0xa1, u"\u03d2", "upshook"), (0xa2, u"\u02b9", "prime"), (0xa3, u"\u2264", "leq", "lessequal"), (0xa4, u"\u2044", "fraction", "fractionslash"), (0xa5, u"\u221E", "infinity", "infty"), (0xa6, u"\u0192", "f", "function", "fhook"), (0xa7, u"\u2663", "club"), (0xa8, u"\u2666", "diamond"), (0xa9, u"\u2665", "heart"), (0xaa, u"\u2660", "spade"), (0xab, u"\u2194", "leftrightarrow", "lrarrow"), (0xac, u"\u2190", "leftarrow", "larrow"), (0xad, u"\u2191", "uparrow"), (0xae, u"\u2192", "rightarrow", "rarrow"), (0xaf, u"\u2193", "downarrow"), (0xb0, u"\u00b0", "degree"), (0xb1, u"\u00b1", "plusminus"), (0xb2, u"\u02ba", "primeprime", "doubleprime", "prime2"), (0xb3, u"\u2265", "geq", "greaterequal"), (0xb4, u"\u00d7", "times"), (0xb5, u"\u221d", "proportional", "propto"), (0xb6, u"\u2202", "partial"), (0xb7, u"\u2022", "cdot", "bullet"), (0xb8, u"\u00f7", "divide"), (0xb9, u"\u2260", "notequal", "neq"), (0xba, u"\u2261", "equiv", "equivalence" ), (0xbb, u"\u2248", "approx", "almostequal"), (0xbc, u"\u2026", "ellipsis", "3dots"), (0xbd, u"\u007c", "vertical", "solidus"), (0xbe, u"\u23af", "horizontal", "longbar"), (0xbf, u"\u21b5", "downleftarrow"), (0xc0, u"\u2135", "aleph", "alef"), (0xc1, u"\u2111", "script-letter-I"), (0xc2, u"\u211c", "script-letter-R"), (0xc3, u"\u2118", "script-letter-P"), (0xc4, u"\u2297", "circled-times"), (0xc5, u"\u2295", "circled-plus"), (0xc6, u"\u2205", "emptyset"), (0xc7, u"\u2229", "intersection"), (0xc8, u"\u222a", "union"), (0xc9, u"\u2283", "superset"), (0xca, u"\u2287", "superset-or-equal"), (0xcb, u"\u2284", "not-subset"), (0xcc, u"\u2282", "subset"), (0xcd, u"\u2286", "subset-or-equal"), (0xce, u"\u2208", "element"), (0xcf, u"\u2209", "not-element"), (0xd0, u"\u2220", "angle"), (0xd1, u"\u2207", "del", "nabla", "gradient"), (0xd2, u"\uf8e8", "registered-serif"), (0xd3, u"\uf8e9", "copyright-serif"), (0xd4, u"\uf8ea", "trademark-serif"), (0xd5, u"\u220f", "product"), (0xd6, u"\u221a", "sqrt", "radical", "root"), (0xd7, u"\u22c5", "cdot", "center-dot", "dot-operator"), (0xd8, u"\u00ac", "not"), (0xd9, u"\u2227", "logical-and", "conjunction"), (0xda, u"\u2228", "logical-or", "disjunction", "alternation"), (0xdb, u"\u21d4", "left-right-double-arrow", "iff"), (0xdc, u"\u21d0", "left-double-arrow"), (0xdd, u"\u21d1", "up-double-arrow"), (0xde, u"\u21d2", "right-double-arrow", "implies"), (0xdf, u"\u21d3", "down-double-arrow"), (0xe0, u"\u25ca", "lozenge"), (0xe1, u"\u3008", "left-angle-bracket", "langle"), (0xe2, u"\u00ae", "registered-sans"), (0xe3, u"\u00a9", "copyright-sans"), (0xe4, u"\u2122", "trademark-sans"), (0xe5, u"\u2211", "sum"), (0xf2, u"\u2228", "integral"), ] #insert unicodes for official greek letters, so a pure unicode string with these already properly encoded will translate correctly _greekorder='abgdezhqiklmnxoprvstufcyw' _greekuppernames=['Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon', 'Zeta', 'Eta', 'Theta', 'Iota', 'Kappa', 'Lambda', 'Mu', 'Nu', 'Xi', 'Omicron', 'Pi', 'Rho', 'finalsigma', 'Sigma', 'Tau', 'Upsilon', 'Phi', 'Chi', 'Psi', 'Omega'] #note thet @ sign is in the place of the 'final sigma' character in the standard greek alphabet mapping for the symbol font _ugrcaps="".join([unichr(x) for x in range(0x391, 0x391+len(_greekorder))]) _symbols += zip([ord(x) for x in _greekorder.upper()], _ugrcaps) _symbols += zip([ord(x) for x in _greekorder.upper()], _greekuppernames) _ugrlower="".join([unichr(x) for x in range(0x3b1, 0x3b1+len(_greekorder))]) _symbols += zip([ord(x) for x in _greekorder], _ugrlower) _greeklowernames=[x.lower() for x in _greekuppernames] _greeklowernames[_greeklowernames.index('finalsigma')]='altpi' _symbols += zip([ord(x) for x in _greekorder], [x.lower() for x in _greeklowernames]) gracedict={} for tt in _symbols: if tt[0] > 0x20 and tt[0] < 0x7f: vstr=r"\x"+chr(tt[0])+r"\f{}" else: vstr=r"\x\#{%02x}\f{}" % tt[0] for tag in tt[1:]: gracedict[tag]=vstr _normalascii="".join([chr(i) for i in range(32,127)]) _normalucode=unicode(_normalascii) def remove_redundant_changes(gracestring): """collapse out consecutive font-switching commands so that \xabc\f{}\xdef\f{} becomes \xabcdef\f{}""" while(1): xs=gracestring.find(r"\f{}\x") if xs<0: break if xs >=0: gracestring=gracestring[:xs]+gracestring[xs+6:] return gracestring def translate_unicode_to_grace(ucstring): """take a string consisting of unicode characters for a mixture of normal characters and characters which map to glyphs in Symbol and create a Grace markup string from it""" outstr="" for uc in ucstring: if uc in _normalucode: outstr+=str(uc) #better exist in ascii else: outstr+=gracedict.get(uc,"?") return remove_redundant_changes(outstr) def format_python_to_grace(pystring): """take a string with %(alpha)s%(Upsilon)s type coding, and make a Grace markup string from it""" return remove_redundant_changes(pystring % gracedict) if __name__=="__main__": # a ur"foo" string is raw unicode, iin which only \u is interpreted, so it is good for grace escapes print translate_unicode_to_grace(ur"Hello\xQ\f{}\u0391\u03b1\u2227\u22c5\u03c8\u03a8\u03c9\u03a9") import sys import time import os import GracePlot class myGrace(GracePlot.GracePlot): def write_string(self, text="", font=0, x=0.5, y=0.5, size=1.0, just=0, color=1, coordinates="world", angle=0.0): strg="""with string string on string loctype %(coordinates)s string %(x)g, %(y)g string color %(color)d string rot %(angle)f string font %(font)d string just %(just)d string char size %(size)f string def "%(text)s" """ % locals() self.write(strg) c=GracePlot.colors stylecolors=[c.green, c.blue, c.red, c.orange, c.magenta, c.black] s1, s2, s3, s4, s5, s6 =[ GracePlot.Symbol(symbol=GracePlot.symbols.circle, fillcolor=sc, size=0.3, linestyle=GracePlot.lines.none) for sc in stylecolors ] l1, l2, l3, l4, l5, l6=[ GracePlot.Line(type=GracePlot.lines.solid, color=sc, linewidth=2.0) for sc in stylecolors] noline=GracePlot.Line(type=GracePlot.lines.none) graceSession=myGrace(width=11, height=8) g=graceSession[0] g.xlimit(-1,16) g.ylimit(-1,22) for row in range(16): for col in range(16): row*16+col graceSession.write_string(text=r"\x\#{%02x}"%(row*16+col), x=col, y=row, just=2, color=1, size=1.5) alphabet="".join(map(lambda x: "%("+x+")s", _greeklowernames)) +"%(aleph)s %(trademark-serif)s %(trademark-sans)s" print alphabet graceSession.write_string(text=format_python_to_grace(alphabet), x=0, y=17, just=0, color=1, size=1.5) alphabet="".join(map(lambda x: "%("+x+")s", _greekuppernames)) print alphabet graceSession.write_string(text=format_python_to_grace(alphabet), x=0, y=18, just=0, color=1, size=1.5) alphabet=u"\N{GREEK CAPITAL LETTER PSI} \N{NOT SIGN} Goodbye \N{CIRCLED TIMES}" graceSession.write_string(text=translate_unicode_to_grace(alphabet), x=0, y=19, just=0, color=1, size=1.5) graceSession.redraw(True)
bt3gl/Plotting-in-Linux
grace/src/symbol_mapping.py
Python
mit
8,291
0.014353
# -*- coding: utf-8 -*- """QGIS Unit tests for edit widgets. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Matthias Kuhn' __date__ = '20/05/2015' __copyright__ = 'Copyright 2015, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis import os from qgis.core import QgsFeature, QgsGeometry, QgsPoint, QgsVectorLayer, NULL from qgis.gui import QgsEditorWidgetRegistry from PyQt4 import QtCore from qgis.testing import (start_app, unittest ) from utilities import unitTestDataPath start_app() class TestQgsTextEditWidget(unittest.TestCase): @classmethod def setUpClass(cls): QgsEditorWidgetRegistry.initEditors() def createLayerWithOnePoint(self): self.layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "addfeat", "memory") pr = self.layer.dataProvider() f = QgsFeature() f.setAttributes(["test", 123]) f.setGeometry(QgsGeometry.fromPoint(QgsPoint(100, 200))) assert pr.addFeatures([f]) assert self.layer.pendingFeatureCount() == 1 return self.layer def doAttributeTest(self, idx, expected): reg = QgsEditorWidgetRegistry.instance() configWdg = reg.createConfigWidget('TextEdit', self.layer, idx, None) config = configWdg.config() editwidget = reg.create('TextEdit', self.layer, idx, config, None, None) editwidget.setValue('value') assert editwidget.value() == expected[0] editwidget.setValue(123) assert editwidget.value() == expected[1] editwidget.setValue(None) assert editwidget.value() == expected[2] editwidget.setValue(NULL) assert editwidget.value() == expected[3] def test_SetValue(self): self.createLayerWithOnePoint() self.doAttributeTest(0, ['value', '123', NULL, NULL]) self.doAttributeTest(1, [NULL, 123, NULL, NULL]) if __name__ == '__main__': unittest.main()
NINAnor/QGIS
tests/src/python/test_qgseditwidgets.py
Python
gpl-2.0
2,315
0.000864
# This is the example of main program file which imports entities, # connects to the database, drops/creates specified tables # and populate some data to the database from pony.orm import * # or just import db_session, etc. import all_entities # This command make sure that all entities are imported from base_entities import db # Will bind this database from db_settings import current_settings # binding params db.bind(*current_settings['args'], **current_settings['kwargs']) from db_utils import connect from db_loading import populate_database if __name__ == '__main__': sql_debug(True) connect(db, drop_and_create='ALL') # drop_and_create=['Topic', 'Comment']) populate_database()
kozlovsky/ponymodules
main.py
Python
mit
707
0.004243
# -*- coding: utf-8 -*- # # exercise 4: variables and names # cars = 100 space_in_a_car = 4.0 drivers = 30 passengers = 90 cars_not_driven = cars - drivers cars_driven = drivers carpool_capacity = cars_driven * space_in_a_car average_passengers_per_car = passengers / cars_driven print "There are", cars, "cars avaliable." print "There are only", drivers, "drivers avaliable." print "There will be", cars_not_driven, "empty cars today." print "We can transport", carpool_capacity, "prople today." print "We have", passengers, "to carpool today" print "We need to put about", average_passengers_per_car, "in each car."
zstang/learning-python-the-hard-way
ex4.py
Python
mit
621
0.00161
# ---------------------------------------------------------------------------- # pyglet # Copyright (c) 2006-2008 Alex Holkner # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of pyglet nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- # png.py - PNG encoder in pure Python # Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org> # <ah> Modifications for pyglet by Alex Holkner <alex.holkner@gmail.com> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Contributors (alphabetical): # Nicko van Someren <nicko@nicko.org> # # Changelog (recent first): # 2006-06-17 Nicko: Reworked into a class, faster interlacing. # 2006-06-17 Johann: Very simple prototype PNG decoder. # 2006-06-17 Nicko: Test suite with various image generators. # 2006-06-17 Nicko: Alpha-channel, grey-scale, 16-bit/plane support. # 2006-06-15 Johann: Scanline iterator interface for large input files. # 2006-06-09 Johann: Very simple prototype PNG encoder. """ Pure Python PNG Reader/Writer This is an implementation of a subset of the PNG specification at http://www.w3.org/TR/2003/REC-PNG-20031110 in pure Python. It reads and writes PNG files with 8/16/24/32/48/64 bits per pixel (greyscale, RGB, RGBA, with 8 or 16 bits per layer), with a number of options. For help, type "import png; help(png)" in your python interpreter. This file can also be used as a command-line utility to convert PNM files to PNG. The interface is similar to that of the pnmtopng program from the netpbm package. Type "python png.py --help" at the shell prompt for usage and a list of options. """ __revision__ = '$Rev$' __date__ = '$Date$' __author__ = '$Author$' import sys import zlib import struct import math from array import array from pyglet.compat import asbytes _adam7 = ((0, 0, 8, 8), (4, 0, 8, 8), (0, 4, 4, 8), (2, 0, 4, 4), (0, 2, 2, 4), (1, 0, 2, 2), (0, 1, 1, 2)) def interleave_planes(ipixels, apixels, ipsize, apsize): """ Interleave color planes, e.g. RGB + A = RGBA. Return an array of pixels consisting of the ipsize bytes of data from each pixel in ipixels followed by the apsize bytes of data from each pixel in apixels, for an image of size width x height. """ itotal = len(ipixels) atotal = len(apixels) newtotal = itotal + atotal newpsize = ipsize + apsize # Set up the output buffer out = array('B') # It's annoying that there is no cheap way to set the array size :-( out.extend(ipixels) out.extend(apixels) # Interleave in the pixel data for i in range(ipsize): out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize] for i in range(apsize): out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize] return out class Error(Exception): pass class Writer: """ PNG encoder in pure Python. """ def __init__(self, width, height, transparent=None, background=None, gamma=None, greyscale=False, has_alpha=False, bytes_per_sample=1, compression=None, interlaced=False, chunk_limit=2**20): """ Create a PNG encoder object. Arguments: width, height - size of the image in pixels transparent - create a tRNS chunk background - create a bKGD chunk gamma - create a gAMA chunk greyscale - input data is greyscale, not RGB has_alpha - input data has alpha channel (RGBA) bytes_per_sample - 8-bit or 16-bit input data compression - zlib compression level (1-9) chunk_limit - write multiple IDAT chunks to save memory If specified, the transparent and background parameters must be a tuple with three integer values for red, green, blue, or a simple integer (or singleton tuple) for a greyscale image. If specified, the gamma parameter must be a float value. """ if width <= 0 or height <= 0: raise ValueError("width and height must be greater than zero") if has_alpha and transparent is not None: raise ValueError( "transparent color not allowed with alpha channel") if bytes_per_sample < 1 or bytes_per_sample > 2: raise ValueError("bytes per sample must be 1 or 2") if transparent is not None: if greyscale: if type(transparent) is not int: raise ValueError( "transparent color for greyscale must be integer") else: if not (len(transparent) == 3 and type(transparent[0]) is int and type(transparent[1]) is int and type(transparent[2]) is int): raise ValueError( "transparent color must be a triple of integers") if background is not None: if greyscale: if type(background) is not int: raise ValueError( "background color for greyscale must be integer") else: if not (len(background) == 3 and type(background[0]) is int and type(background[1]) is int and type(background[2]) is int): raise ValueError( "background color must be a triple of integers") self.width = width self.height = height self.transparent = transparent self.background = background self.gamma = gamma self.greyscale = greyscale self.has_alpha = has_alpha self.bytes_per_sample = bytes_per_sample self.compression = compression self.chunk_limit = chunk_limit self.interlaced = interlaced if self.greyscale: self.color_depth = 1 if self.has_alpha: self.color_type = 4 self.psize = self.bytes_per_sample * 2 else: self.color_type = 0 self.psize = self.bytes_per_sample else: self.color_depth = 3 if self.has_alpha: self.color_type = 6 self.psize = self.bytes_per_sample * 4 else: self.color_type = 2 self.psize = self.bytes_per_sample * 3 def write_chunk(self, outfile, tag, data): """ Write a PNG chunk to the output file, including length and checksum. """ # http://www.w3.org/TR/PNG/#5Chunk-layout outfile.write(struct.pack("!I", len(data))) outfile.write(tag) outfile.write(data) checksum = zlib.crc32(tag) checksum = zlib.crc32(data, checksum) # <ah> Avoid DeprecationWarning: struct integer overflow masking # with Python2.5/Windows. checksum = checksum & 0xffffffff outfile.write(struct.pack("!I", checksum)) def write(self, outfile, scanlines): """ Write a PNG image to the output file. """ # http://www.w3.org/TR/PNG/#5PNG-file-signature outfile.write(struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)) # http://www.w3.org/TR/PNG/#11IHDR if self.interlaced: interlaced = 1 else: interlaced = 0 self.write_chunk(outfile, 'IHDR', struct.pack("!2I5B", self.width, self.height, self.bytes_per_sample * 8, self.color_type, 0, 0, interlaced)) # http://www.w3.org/TR/PNG/#11tRNS if self.transparent is not None: if self.greyscale: self.write_chunk(outfile, 'tRNS', struct.pack("!1H", *self.transparent)) else: self.write_chunk(outfile, 'tRNS', struct.pack("!3H", *self.transparent)) # http://www.w3.org/TR/PNG/#11bKGD if self.background is not None: if self.greyscale: self.write_chunk(outfile, 'bKGD', struct.pack("!1H", *self.background)) else: self.write_chunk(outfile, 'bKGD', struct.pack("!3H", *self.background)) # http://www.w3.org/TR/PNG/#11gAMA if self.gamma is not None: self.write_chunk(outfile, 'gAMA', struct.pack("!L", int(self.gamma * 100000))) # http://www.w3.org/TR/PNG/#11IDAT if self.compression is not None: compressor = zlib.compressobj(self.compression) else: compressor = zlib.compressobj() data = array('B') for scanline in scanlines: data.append(0) data.extend(scanline) if len(data) > self.chunk_limit: compressed = compressor.compress(data.tostring()) if len(compressed): # print >> sys.stderr, len(data), len(compressed) self.write_chunk(outfile, 'IDAT', compressed) data = array('B') if len(data): compressed = compressor.compress(data.tostring()) else: compressed = '' flushed = compressor.flush() if len(compressed) or len(flushed): # print >> sys.stderr, len(data), len(compressed), len(flushed) self.write_chunk(outfile, 'IDAT', compressed + flushed) # http://www.w3.org/TR/PNG/#11IEND self.write_chunk(outfile, 'IEND', '') def write_array(self, outfile, pixels): """ Encode a pixel array to PNG and write output file. """ if self.interlaced: self.write(outfile, self.array_scanlines_interlace(pixels)) else: self.write(outfile, self.array_scanlines(pixels)) def convert_ppm(self, ppmfile, outfile): """ Convert a PPM file containing raw pixel data into a PNG file with the parameters set in the writer object. """ if self.interlaced: pixels = array('B') pixels.fromfile(ppmfile, self.bytes_per_sample * self.color_depth * self.width * self.height) self.write(outfile, self.array_scanlines_interlace(pixels)) else: self.write(outfile, self.file_scanlines(ppmfile)) def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile): """ Convert a PPM and PGM file containing raw pixel data into a PNG outfile with the parameters set in the writer object. """ pixels = array('B') pixels.fromfile(ppmfile, self.bytes_per_sample * self.color_depth * self.width * self.height) apixels = array('B') apixels.fromfile(pgmfile, self.bytes_per_sample * self.width * self.height) pixels = interleave_planes(pixels, apixels, self.bytes_per_sample * self.color_depth, self.bytes_per_sample) if self.interlaced: self.write(outfile, self.array_scanlines_interlace(pixels)) else: self.write(outfile, self.array_scanlines(pixels)) def file_scanlines(self, infile): """ Generator for scanlines from an input file. """ row_bytes = self.psize * self.width for y in range(self.height): scanline = array('B') scanline.fromfile(infile, row_bytes) yield scanline def array_scanlines(self, pixels): """ Generator for scanlines from an array. """ row_bytes = self.width * self.psize stop = 0 for y in range(self.height): start = stop stop = start + row_bytes yield pixels[start:stop] def old_array_scanlines_interlace(self, pixels): """ Generator for interlaced scanlines from an array. http://www.w3.org/TR/PNG/#8InterlaceMethods """ row_bytes = self.psize * self.width for xstart, ystart, xstep, ystep in _adam7: for y in range(ystart, self.height, ystep): if xstart < self.width: if xstep == 1: offset = y*row_bytes yield pixels[offset:offset+row_bytes] else: row = array('B') offset = y*row_bytes + xstart* self.psize skip = self.psize * xstep for x in range(xstart, self.width, xstep): row.extend(pixels[offset:offset + self.psize]) offset += skip yield row def array_scanlines_interlace(self, pixels): """ Generator for interlaced scanlines from an array. http://www.w3.org/TR/PNG/#8InterlaceMethods """ row_bytes = self.psize * self.width for xstart, ystart, xstep, ystep in _adam7: for y in range(ystart, self.height, ystep): if xstart >= self.width: continue if xstep == 1: offset = y * row_bytes yield pixels[offset:offset+row_bytes] else: row = array('B') # Note we want the ceiling of (self.width - xstart) / xtep row_len = self.psize * ( (self.width - xstart + xstep - 1) / xstep) # There's no easier way to set the length of an array row.extend(pixels[0:row_len]) offset = y * row_bytes + xstart * self.psize end_offset = (y+1) * row_bytes skip = self.psize * xstep for i in range(self.psize): row[i:row_len:self.psize] = \ pixels[offset+i:end_offset:skip] yield row class _readable: """ A simple file-like interface for strings and arrays. """ def __init__(self, buf): self.buf = buf self.offset = 0 def read(self, n): r = self.buf[offset:offset+n] if isinstance(r, array): r = r.tostring() self.offset += n return r class Reader: """ PNG decoder in pure Python. """ def __init__(self, _guess=None, **kw): """ Create a PNG decoder object. The constructor expects exactly one keyword argument. If you supply a positional argument instead, it will guess the input type. You can choose among the following arguments: filename - name of PNG input file file - object with a read() method pixels - array or string with PNG data """ if ((_guess is not None and len(kw) != 0) or (_guess is None and len(kw) != 1)): raise TypeError("Reader() takes exactly 1 argument") if _guess is not None: if isinstance(_guess, array): kw["pixels"] = _guess elif isinstance(_guess, str): kw["filename"] = _guess elif isinstance(_guess, file): kw["file"] = _guess if "filename" in kw: self.file = file(kw["filename"]) elif "file" in kw: self.file = kw["file"] elif "pixels" in kw: self.file = _readable(kw["pixels"]) else: raise TypeError("expecting filename, file or pixels array") def read_chunk(self): """ Read a PNG chunk from the input file, return tag name and data. """ # http://www.w3.org/TR/PNG/#5Chunk-layout try: data_bytes, tag = struct.unpack('!I4s', self.file.read(8)) except struct.error: raise ValueError('Chunk too short for header') data = self.file.read(data_bytes) if len(data) != data_bytes: raise ValueError('Chunk %s too short for required %i data octets' % (tag, data_bytes)) checksum = self.file.read(4) if len(checksum) != 4: raise ValueError('Chunk %s too short for checksum', tag) verify = zlib.crc32(tag) verify = zlib.crc32(data, verify) # Whether the output from zlib.crc32 is signed or not varies # according to hideous implementation details, see # http://bugs.python.org/issue1202 . # We coerce it to be positive here (in a way which works on # Python 2.3 and older). verify &= 2**32 - 1 verify = struct.pack('!I', verify) if checksum != verify: # print repr(checksum) (a,) = struct.unpack('!I', checksum) (b,) = struct.unpack('!I', verify) raise ValueError("Checksum error in %s chunk: 0x%X != 0x%X" % (tag, a, b)) return tag, data def _reconstruct_sub(self, offset, xstep, ystep): """ Reverse sub filter. """ pixels = self.pixels a_offset = offset offset += self.psize * xstep if xstep == 1: for index in range(self.psize, self.row_bytes): x = pixels[offset] a = pixels[a_offset] pixels[offset] = (x + a) & 0xff offset += 1 a_offset += 1 else: byte_step = self.psize * xstep for index in range(byte_step, self.row_bytes, byte_step): for i in range(self.psize): x = pixels[offset + i] a = pixels[a_offset + i] pixels[offset + i] = (x + a) & 0xff offset += self.psize * xstep a_offset += self.psize * xstep def _reconstruct_up(self, offset, xstep, ystep): """ Reverse up filter. """ pixels = self.pixels b_offset = offset - (self.row_bytes * ystep) if xstep == 1: for index in range(self.row_bytes): x = pixels[offset] b = pixels[b_offset] pixels[offset] = (x + b) & 0xff offset += 1 b_offset += 1 else: for index in range(0, self.row_bytes, xstep * self.psize): for i in range(self.psize): x = pixels[offset + i] b = pixels[b_offset + i] pixels[offset + i] = (x + b) & 0xff offset += self.psize * xstep b_offset += self.psize * xstep def _reconstruct_average(self, offset, xstep, ystep): """ Reverse average filter. """ pixels = self.pixels a_offset = offset - (self.psize * xstep) b_offset = offset - (self.row_bytes * ystep) if xstep == 1: for index in range(self.row_bytes): x = pixels[offset] if index < self.psize: a = 0 else: a = pixels[a_offset] if b_offset < 0: b = 0 else: b = pixels[b_offset] pixels[offset] = (x + ((a + b) >> 1)) & 0xff offset += 1 a_offset += 1 b_offset += 1 else: for index in range(0, self.row_bytes, self.psize * xstep): for i in range(self.psize): x = pixels[offset+i] if index < self.psize: a = 0 else: a = pixels[a_offset + i] if b_offset < 0: b = 0 else: b = pixels[b_offset + i] pixels[offset + i] = (x + ((a + b) >> 1)) & 0xff offset += self.psize * xstep a_offset += self.psize * xstep b_offset += self.psize * xstep def _reconstruct_paeth(self, offset, xstep, ystep): """ Reverse Paeth filter. """ pixels = self.pixels a_offset = offset - (self.psize * xstep) b_offset = offset - (self.row_bytes * ystep) c_offset = b_offset - (self.psize * xstep) # There's enough inside this loop that it's probably not worth # optimising for xstep == 1 for index in range(0, self.row_bytes, self.psize * xstep): for i in range(self.psize): x = pixels[offset+i] if index < self.psize: a = c = 0 b = pixels[b_offset+i] else: a = pixels[a_offset+i] b = pixels[b_offset+i] c = pixels[c_offset+i] p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) if pa <= pb and pa <= pc: pr = a elif pb <= pc: pr = b else: pr = c pixels[offset+i] = (x + pr) & 0xff offset += self.psize * xstep a_offset += self.psize * xstep b_offset += self.psize * xstep c_offset += self.psize * xstep # N.B. PNG files with 'up', 'average' or 'paeth' filters on the # first line of a pass are legal. The code above for 'average' # deals with this case explicitly. For up we map to the null # filter and for paeth we map to the sub filter. def reconstruct_line(self, filter_type, first_line, offset, xstep, ystep): # print >> sys.stderr, "Filter type %s, first_line=%s" % ( # filter_type, first_line) filter_type += (first_line << 8) if filter_type == 1 or filter_type == 0x101 or filter_type == 0x104: self._reconstruct_sub(offset, xstep, ystep) elif filter_type == 2: self._reconstruct_up(offset, xstep, ystep) elif filter_type == 3 or filter_type == 0x103: self._reconstruct_average(offset, xstep, ystep) elif filter_type == 4: self._reconstruct_paeth(offset, xstep, ystep) return def deinterlace(self, scanlines): # print >> sys.stderr, ("Reading interlaced, w=%s, r=%s, planes=%s," + # " bpp=%s") % (self.width, self.height, self.planes, self.bps) a = array('B') self.pixels = a # Make the array big enough temp = scanlines[0:self.width*self.height*self.psize] a.extend(temp) source_offset = 0 for xstart, ystart, xstep, ystep in _adam7: # print >> sys.stderr, "Adam7: start=%s,%s step=%s,%s" % ( # xstart, ystart, xstep, ystep) filter_first_line = 1 for y in range(ystart, self.height, ystep): if xstart >= self.width: continue filter_type = scanlines[source_offset] source_offset += 1 if xstep == 1: offset = y * self.row_bytes a[offset:offset+self.row_bytes] = \ scanlines[source_offset:source_offset + self.row_bytes] source_offset += self.row_bytes else: # Note we want the ceiling of (width - xstart) / xtep row_len = self.psize * ( (self.width - xstart + xstep - 1) / xstep) offset = y * self.row_bytes + xstart * self.psize end_offset = (y+1) * self.row_bytes skip = self.psize * xstep for i in range(self.psize): a[offset+i:end_offset:skip] = \ scanlines[source_offset + i: source_offset + row_len: self.psize] source_offset += row_len if filter_type: self.reconstruct_line(filter_type, filter_first_line, offset, xstep, ystep) filter_first_line = 0 return a def read_flat(self, scanlines): a = array('B') self.pixels = a offset = 0 source_offset = 0 filter_first_line = 1 for y in range(self.height): filter_type = scanlines[source_offset] source_offset += 1 a.extend(scanlines[source_offset: source_offset + self.row_bytes]) if filter_type: self.reconstruct_line(filter_type, filter_first_line, offset, 1, 1) filter_first_line = 0 offset += self.row_bytes source_offset += self.row_bytes return a def read(self): """ Read a simple PNG file, return width, height, pixels and image metadata This function is a very early prototype with limited flexibility and excessive use of memory. """ signature = self.file.read(8) if (signature != struct.pack("8B", 137, 80, 78, 71, 13, 10, 26, 10)): raise Error("PNG file has invalid header") compressed = [] image_metadata = {} while True: try: tag, data = self.read_chunk() except ValueError, e: raise Error('Chunk error: ' + e.args[0]) # print >> sys.stderr, tag, len(data) if tag == asbytes('IHDR'): # http://www.w3.org/TR/PNG/#11IHDR (width, height, bits_per_sample, color_type, compression_method, filter_method, interlaced) = struct.unpack("!2I5B", data) bps = bits_per_sample // 8 if bps == 0: raise Error("unsupported pixel depth") if bps > 2 or bits_per_sample != (bps * 8): raise Error("invalid pixel depth") if color_type == 0: greyscale = True has_alpha = False planes = 1 elif color_type == 2: greyscale = False has_alpha = False planes = 3 elif color_type == 4: greyscale = True has_alpha = True planes = 2 elif color_type == 6: greyscale = False has_alpha = True planes = 4 else: raise Error("unknown PNG colour type %s" % color_type) if compression_method != 0: raise Error("unknown compression method") if filter_method != 0: raise Error("unknown filter method") self.bps = bps self.planes = planes self.psize = bps * planes self.width = width self.height = height self.row_bytes = width * self.psize elif tag == asbytes('IDAT'): # http://www.w3.org/TR/PNG/#11IDAT compressed.append(data) elif tag == asbytes('bKGD'): if greyscale: image_metadata["background"] = struct.unpack("!1H", data) else: image_metadata["background"] = struct.unpack("!3H", data) elif tag == asbytes('tRNS'): if greyscale: image_metadata["transparent"] = struct.unpack("!1H", data) else: image_metadata["transparent"] = struct.unpack("!3H", data) elif tag == asbytes('gAMA'): image_metadata["gamma"] = ( struct.unpack("!L", data)[0]) / 100000.0 elif tag == asbytes('IEND'): # http://www.w3.org/TR/PNG/#11IEND break scanlines = array('B', zlib.decompress(asbytes('').join(compressed))) if interlaced: pixels = self.deinterlace(scanlines) else: pixels = self.read_flat(scanlines) image_metadata["greyscale"] = greyscale image_metadata["has_alpha"] = has_alpha image_metadata["bytes_per_sample"] = bps image_metadata["interlaced"] = interlaced return width, height, pixels, image_metadata def test_suite(options): """ Run regression test and write PNG file to stdout. """ # Below is a big stack of test image generators def test_gradient_horizontal_lr(x, y): return x def test_gradient_horizontal_rl(x, y): return 1-x def test_gradient_vertical_tb(x, y): return y def test_gradient_vertical_bt(x, y): return 1-y def test_radial_tl(x, y): return max(1-math.sqrt(x*x+y*y), 0.0) def test_radial_center(x, y): return test_radial_tl(x-0.5, y-0.5) def test_radial_tr(x, y): return test_radial_tl(1-x, y) def test_radial_bl(x, y): return test_radial_tl(x, 1-y) def test_radial_br(x, y): return test_radial_tl(1-x, 1-y) def test_stripe(x, n): return 1.0*(int(x*n) & 1) def test_stripe_h_2(x, y): return test_stripe(x, 2) def test_stripe_h_4(x, y): return test_stripe(x, 4) def test_stripe_h_10(x, y): return test_stripe(x, 10) def test_stripe_v_2(x, y): return test_stripe(y, 2) def test_stripe_v_4(x, y): return test_stripe(y, 4) def test_stripe_v_10(x, y): return test_stripe(y, 10) def test_stripe_lr_10(x, y): return test_stripe(x+y, 10) def test_stripe_rl_10(x, y): return test_stripe(x-y, 10) def test_checker(x, y, n): return 1.0*((int(x*n) & 1) ^ (int(y*n) & 1)) def test_checker_8(x, y): return test_checker(x, y, 8) def test_checker_15(x, y): return test_checker(x, y, 15) def test_zero(x, y): return 0 def test_one(x, y): return 1 test_patterns = { "GLR": test_gradient_horizontal_lr, "GRL": test_gradient_horizontal_rl, "GTB": test_gradient_vertical_tb, "GBT": test_gradient_vertical_bt, "RTL": test_radial_tl, "RTR": test_radial_tr, "RBL": test_radial_bl, "RBR": test_radial_br, "RCTR": test_radial_center, "HS2": test_stripe_h_2, "HS4": test_stripe_h_4, "HS10": test_stripe_h_10, "VS2": test_stripe_v_2, "VS4": test_stripe_v_4, "VS10": test_stripe_v_10, "LRS": test_stripe_lr_10, "RLS": test_stripe_rl_10, "CK8": test_checker_8, "CK15": test_checker_15, "ZERO": test_zero, "ONE": test_one, } def test_pattern(width, height, depth, pattern): a = array('B') fw = float(width) fh = float(height) pfun = test_patterns[pattern] if depth == 1: for y in range(height): for x in range(width): a.append(int(pfun(float(x)/fw, float(y)/fh) * 255)) elif depth == 2: for y in range(height): for x in range(width): v = int(pfun(float(x)/fw, float(y)/fh) * 65535) a.append(v >> 8) a.append(v & 0xff) return a def test_rgba(size=256, depth=1, red="GTB", green="GLR", blue="RTL", alpha=None): r = test_pattern(size, size, depth, red) g = test_pattern(size, size, depth, green) b = test_pattern(size, size, depth, blue) if alpha: a = test_pattern(size, size, depth, alpha) i = interleave_planes(r, g, depth, depth) i = interleave_planes(i, b, 2 * depth, depth) if alpha: i = interleave_planes(i, a, 3 * depth, depth) return i # The body of test_suite() size = 256 if options.test_size: size = options.test_size depth = 1 if options.test_deep: depth = 2 kwargs = {} if options.test_red: kwargs["red"] = options.test_red if options.test_green: kwargs["green"] = options.test_green if options.test_blue: kwargs["blue"] = options.test_blue if options.test_alpha: kwargs["alpha"] = options.test_alpha pixels = test_rgba(size, depth, **kwargs) writer = Writer(size, size, bytes_per_sample=depth, transparent=options.transparent, background=options.background, gamma=options.gamma, has_alpha=options.test_alpha, compression=options.compression, interlaced=options.interlace) writer.write_array(sys.stdout, pixels) def read_pnm_header(infile, supported='P6'): """ Read a PNM header, return width and height of the image in pixels. """ header = [] while len(header) < 4: line = infile.readline() sharp = line.find('#') if sharp > -1: line = line[:sharp] header.extend(line.split()) if len(header) == 3 and header[0] == 'P4': break # PBM doesn't have maxval if header[0] not in supported: raise NotImplementedError('file format %s not supported' % header[0]) if header[0] != 'P4' and header[3] != '255': raise NotImplementedError('maxval %s not supported' % header[3]) return int(header[1]), int(header[2]) def color_triple(color): """ Convert a command line color value to a RGB triple of integers. FIXME: Somewhere we need support for greyscale backgrounds etc. """ if color.startswith('#') and len(color) == 4: return (int(color[1], 16), int(color[2], 16), int(color[3], 16)) if color.startswith('#') and len(color) == 7: return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16)) elif color.startswith('#') and len(color) == 13: return (int(color[1:5], 16), int(color[5:9], 16), int(color[9:13], 16)) def _main(): """ Run the PNG encoder with options from the command line. """ # Parse command line arguments from optparse import OptionParser version = '%prog ' + __revision__.strip('$').replace('Rev: ', 'r') parser = OptionParser(version=version) parser.set_usage("%prog [options] [pnmfile]") parser.add_option("-i", "--interlace", default=False, action="store_true", help="create an interlaced PNG file (Adam7)") parser.add_option("-t", "--transparent", action="store", type="string", metavar="color", help="mark the specified color as transparent") parser.add_option("-b", "--background", action="store", type="string", metavar="color", help="save the specified background color") parser.add_option("-a", "--alpha", action="store", type="string", metavar="pgmfile", help="alpha channel transparency (RGBA)") parser.add_option("-g", "--gamma", action="store", type="float", metavar="value", help="save the specified gamma value") parser.add_option("-c", "--compression", action="store", type="int", metavar="level", help="zlib compression level (0-9)") parser.add_option("-T", "--test", default=False, action="store_true", help="create a test image") parser.add_option("-R", "--test-red", action="store", type="string", metavar="pattern", help="test pattern for the red image layer") parser.add_option("-G", "--test-green", action="store", type="string", metavar="pattern", help="test pattern for the green image layer") parser.add_option("-B", "--test-blue", action="store", type="string", metavar="pattern", help="test pattern for the blue image layer") parser.add_option("-A", "--test-alpha", action="store", type="string", metavar="pattern", help="test pattern for the alpha image layer") parser.add_option("-D", "--test-deep", default=False, action="store_true", help="use test patterns with 16 bits per layer") parser.add_option("-S", "--test-size", action="store", type="int", metavar="size", help="width and height of the test image") (options, args) = parser.parse_args() # Convert options if options.transparent is not None: options.transparent = color_triple(options.transparent) if options.background is not None: options.background = color_triple(options.background) # Run regression tests if options.test: return test_suite(options) # Prepare input and output files if len(args) == 0: ppmfilename = '-' ppmfile = sys.stdin elif len(args) == 1: ppmfilename = args[0] ppmfile = open(ppmfilename, 'rb') else: parser.error("more than one input file") outfile = sys.stdout # Encode PNM to PNG width, height = read_pnm_header(ppmfile) writer = Writer(width, height, transparent=options.transparent, background=options.background, has_alpha=options.alpha is not None, gamma=options.gamma, compression=options.compression) if options.alpha is not None: pgmfile = open(options.alpha, 'rb') awidth, aheight = read_pnm_header(pgmfile, 'P5') if (awidth, aheight) != (width, height): raise ValueError("alpha channel image size mismatch" + " (%s has %sx%s but %s has %sx%s)" % (ppmfilename, width, height, options.alpha, awidth, aheight)) writer.convert_ppm_and_pgm(ppmfile, pgmfile, outfile, interlace=options.interlace) else: writer.convert_ppm(ppmfile, outfile, interlace=options.interlace) if __name__ == '__main__': _main()
mpasternak/pyglet-fix-issue-552
pyglet/image/codecs/pypng.py
Python
bsd-3-clause
41,571
0.000385
#!/usr/bin/env python # -*- coding: utf-8 -*- import time import obci_log_model class DummyLogModel(obci_log_model.LogModel): def __init__(self): super(DummyLogModel, self).__init__() self._ind = 0 self._peers_log = {'amplifier': {'peer_id': 'amplifier', 'logs': []}, 'mx': {'peer_id': 'mx', 'logs': []} } # 'logs keyed by peer id def next_log(self): time.sleep(0.05) self._ind += 1 if self._ind % 2 == 0: return 'amplifier', 'AMP ' + str(self._ind) else: return 'mx', 'MX ' + str(self._ind) def post_run(self): pass
BrainTech/openbci
obci/control/gui/obci_log_model_dummy.py
Python
gpl-3.0
732
0
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php import typing from ... import exc from ... import util from ...sql import coercions from ...sql import elements from ...sql import operators from ...sql import roles from ...sql.base import _generative from ...sql.base import Generative Selfmatch = typing.TypeVar("Selfmatch", bound="match") class match(Generative, elements.BinaryExpression): """Produce a ``MATCH (X, Y) AGAINST ('TEXT')`` clause. E.g.:: from sqlalchemy import desc from sqlalchemy.dialects.mysql import match match_expr = match( users_table.c.firstname, users_table.c.lastname, against="Firstname Lastname", ) stmt = ( select(users_table) .where(match_expr.in_boolean_mode()) .order_by(desc(match_expr)) ) Would produce SQL resembling:: SELECT id, firstname, lastname FROM user WHERE MATCH(firstname, lastname) AGAINST (:param_1 IN BOOLEAN MODE) ORDER BY MATCH(firstname, lastname) AGAINST (:param_2) DESC The :func:`_mysql.match` function is a standalone version of the :meth:`_sql.ColumnElement.match` method available on all SQL expressions, as when :meth:`_expression.ColumnElement.match` is used, but allows to pass multiple columns :param cols: column expressions to match against :param against: expression to be compared towards :param in_boolean_mode: boolean, set "boolean mode" to true :param in_natural_language_mode: boolean , set "natural language" to true :param with_query_expansion: boolean, set "query expansion" to true .. versionadded:: 1.4.19 .. seealso:: :meth:`_expression.ColumnElement.match` """ __visit_name__ = "mysql_match" inherit_cache = True def __init__(self, *cols, **kw): if not cols: raise exc.ArgumentError("columns are required") against = kw.pop("against", None) if against is None: raise exc.ArgumentError("against is required") against = coercions.expect( roles.ExpressionElementRole, against, ) left = elements.BooleanClauseList._construct_raw( operators.comma_op, clauses=cols, ) left.group = False flags = util.immutabledict( { "mysql_boolean_mode": kw.pop("in_boolean_mode", False), "mysql_natural_language": kw.pop( "in_natural_language_mode", False ), "mysql_query_expansion": kw.pop("with_query_expansion", False), } ) if kw: raise exc.ArgumentError("unknown arguments: %s" % (", ".join(kw))) super(match, self).__init__( left, against, operators.match_op, modifiers=flags ) @_generative def in_boolean_mode(self: Selfmatch) -> Selfmatch: """Apply the "IN BOOLEAN MODE" modifier to the MATCH expression. :return: a new :class:`_mysql.match` instance with modifications applied. """ self.modifiers = self.modifiers.union({"mysql_boolean_mode": True}) return self @_generative def in_natural_language_mode(self: Selfmatch) -> Selfmatch: """Apply the "IN NATURAL LANGUAGE MODE" modifier to the MATCH expression. :return: a new :class:`_mysql.match` instance with modifications applied. """ self.modifiers = self.modifiers.union({"mysql_natural_language": True}) return self @_generative def with_query_expansion(self: Selfmatch) -> Selfmatch: """Apply the "WITH QUERY EXPANSION" modifier to the MATCH expression. :return: a new :class:`_mysql.match` instance with modifications applied. """ self.modifiers = self.modifiers.union({"mysql_query_expansion": True}) return self
sqlalchemy/sqlalchemy
lib/sqlalchemy/dialects/mysql/expression.py
Python
mit
4,164
0
#!/usr/bin/env python2 # -*- coding: utf-8 -*- import urllib2 import json from plugins.plugin import Plugin from time import time from bytebot_config import BYTEBOT_HTTP_TIMEOUT, BYTEBOT_HTTP_MAXSIZE from bytebot_config import BYTEBOT_PLUGIN_CONFIG class parking(Plugin): def __init__(self): pass def registerCommand(self, irc): irc.registerCommand('!parking', 'Parken') def _get_parking_status(self): url = BYTEBOT_PLUGIN_CONFIG['parking']['url'] data = urllib2.urlopen(url, timeout=BYTEBOT_HTTP_TIMEOUT).read( BYTEBOT_HTTP_MAXSIZE) data = unicode(data, errors='ignore') ret = json.loads(data) return ret def onPrivmsg(self, irc, msg, channel, user): if msg.find('!parking') == -1: return self.irc = irc self.channel = channel try: last_parking = irc.last_parking except Exception as e: last_parking = 0 if last_parking < (time() - 60): try: data = self._get_parking_status() irc.msg(channel, 'Free parking lots:') for x in range(1, len(data)): name = data[x][u'name'].encode('ascii', 'ignore') occupied = int(data[x][u'belegt'].encode('ascii', 'ignore')) spaces = int(data[x][u'maximal'].encode('ascii', 'ignore')) if(occupied < 0): occupied = 0 if(spaces <= 0): print_str = '{:25s}: not available'.format(name) else: print_str = '{:25s}: '.format(name) + \ '{:3.0f} / '.format(spaces - occupied) + \ '{:3.0f}'.format(spaces) irc.msg(channel, print_str) irc.last_parking = time() except Exception as e: print(e) irc.msg(channel, 'Error while fetching data.') else: irc.msg(channel, "Don't overdo it ;)")
jurkov/Bytebot
plugins/parking.py
Python
mit
2,182
0
""" TODO: - Handle if file already exists """ import ctypes import io import os import struct from contextlib import contextmanager import ddt import mock from unittest2 import TestCase import tempfile from polypype import _MAX_C_FLOAT, _MAX_C_UINT32, PolyPype from polypype.exceptions import ( PolyPypeArgumentException, PolyPypeException, PolyPypeFileExistsException, PolyPypeOverflowException, PolyPypeTypeException, ) @ddt.ddt class PolyPypeTestCase(TestCase): def setUp(self, *args, **kwargs): self.test_filename = 'test_output' self.polypype = PolyPype(self.test_filename) self.addCleanup( self.remove_file_if_exists, self.polypype.output_filename ) def remove_file_if_exists(self, filename): if os.path.isfile(filename): os.remove(filename) def assert_next_ctype_equal(self, f, t, expected_value): """ Given an open file, expect that the next 4 bytes are a c_float with the expected value. Arguments: f (file): File to read from t (str): `struct` library format string specifying a c type expected_value (int, float): Numeric value of the expected c type """ self.assertEqual( struct.unpack(t, f.read(4))[0], ctypes.c_float(expected_value).value ) @contextmanager def open_output_file(self): """DRY helper for opening the PolyPype output file.""" with io.open(self.test_filename, 'rb') as f: yield f @ddt.data( (0.1, [32, 5, 0.7]), (0.35, [0]), (1, [1, 2, 3]), (1, [_MAX_C_FLOAT]) ) @ddt.unpack def test_write_event(self, time_delta, params): self.polypype.write_event(time_delta, params) with self.open_output_file() as f: self.assert_next_ctype_equal(f, '<f', time_delta) self.assert_next_ctype_equal(f, '<I', len(params)) for param in params: self.assert_next_ctype_equal(f, '<f', param) @ddt.data( (None, [None]), ('1', ['1']), (list(), [list()]), (dict(), [dict()]) ) @ddt.unpack def test_bad_types(self, time_delta, params): with self.assertRaises(PolyPypeTypeException): self.polypype.write_event(time_delta, params) @ddt.data(list(), dict(), set()) def test_no_params(self, empty_container): time_delta = 1 self.polypype.write_event(time_delta, empty_container) with self.open_output_file() as f: self.assert_next_ctype_equal(f, '<f', time_delta) self.assert_next_ctype_equal(f, '<I', 0) self.assertEqual(f.read(), '') def test_too_many_params(self): big_list = mock.MagicMock() big_list.__len__ = mock.MagicMock(return_value=_MAX_C_UINT32 + 1) with self.assertRaises(PolyPypeOverflowException): self.polypype.write_event(1, big_list) def test_param_too_large(self): with self.assertRaises(PolyPypeOverflowException): self.polypype.write_event(1, [_MAX_C_FLOAT * 2]) @mock.patch( 'polypype.struct.pack', mock.Mock(side_effect=PolyPypeException) ) def test_no_event_when_error(self): """ Verify that the event is not written to file if any error occurs. """ try: self.polypype.write_event(1, [2]) except PolyPypeException: pass self.assertFalse( os.path.isfile(self.test_filename), 'Expected output file not to exist.' ) def test_file_already_exists(self): with self.assertRaises(PolyPypeFileExistsException): filename = tempfile.mkstemp()[1] PolyPype(filename) os.remove(filename) def test_overwrite_file(self): self.polypype.write_event(1, [2]) new_polypype = PolyPype(self.test_filename, overwrite_file=True) new_polypype.write_event(3, [4]) with self.open_output_file() as f: self.assert_next_ctype_equal(f, '<f', 3) self.assert_next_ctype_equal(f, '<I', 1) self.assert_next_ctype_equal(f, '<f', 4) def test_append_to_file(self): self.polypype.write_event(1, [2]) new_polypype = PolyPype(self.test_filename, append_to_file=True) new_polypype.write_event(3, [4]) with self.open_output_file() as f: self.assert_next_ctype_equal(f, '<f', 1) self.assert_next_ctype_equal(f, '<I', 1) self.assert_next_ctype_equal(f, '<f', 2) self.assert_next_ctype_equal(f, '<f', 3) self.assert_next_ctype_equal(f, '<I', 1) self.assert_next_ctype_equal(f, '<f', 4) def test_append_and_overwrite(self): with self.assertRaises(PolyPypeArgumentException): PolyPype( self.test_filename, append_to_file=True, overwrite_file=True )
dan-f/polypype
tests/test_polypipe.py
Python
mit
5,061
0
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors # # This module is part of async and is released under # the New BSD License: http://www.opensource.org/licenses/bsd-license.php """Contains a queue based channel implementation""" from Queue import ( Empty, Full ) from util import ( AsyncQueue, SyncQueue, ReadOnly ) from time import time import threading import sys __all__ = ('Channel', 'SerialChannel', 'Writer', 'ChannelWriter', 'CallbackChannelWriter', 'Reader', 'ChannelReader', 'CallbackChannelReader', 'mkchannel', 'ReadOnly', 'IteratorReader', 'CallbackReaderMixin', 'CallbackWriterMixin') #{ Classes class Channel(object): """A channel is similar to a file like object. It has a write end as well as one or more read ends. If Data is in the channel, it can be read, if not the read operation will block until data becomes available. If the channel is closed, any read operation will result in an exception This base class is not instantiated directly, but instead serves as constructor for Rwriter pairs. Create a new channel """ __slots__ = 'queue' # The queue to use to store the actual data QueueCls = AsyncQueue def __init__(self): """initialize this instance with a queue holding the channel contents""" self.queue = self.QueueCls() class SerialChannel(Channel): """A slightly faster version of a Channel, which sacrificed thead-safety for performance""" QueueCls = SyncQueue class Writer(object): """A writer is an object providing write access to a possibly blocking reading device""" __slots__ = tuple() #{ Interface def __init__(self, device): """Initialize the instance with the device to write to""" def write(self, item, block=True, timeout=None): """Write the given item into the device :param block: True if the device may block until space for the item is available :param timeout: The time in seconds to wait for the device to become ready in blocking mode""" raise NotImplementedError() def size(self): """:return: number of items already in the device, they could be read with a reader""" raise NotImplementedError() def close(self): """Close the channel. Multiple close calls on a closed channel are no an error""" raise NotImplementedError() def closed(self): """:return: True if the channel was closed""" raise NotImplementedError() #} END interface class ChannelWriter(Writer): """The write end of a channel, a file-like interface for a channel""" __slots__ = ('channel', '_put') def __init__(self, channel): """Initialize the writer to use the given channel""" self.channel = channel self._put = self.channel.queue.put #{ Interface def write(self, item, block=False, timeout=None): return self._put(item, block, timeout) def size(self): return self.channel.queue.qsize() def close(self): """Close the channel. Multiple close calls on a closed channel are no an error""" self.channel.queue.set_writable(False) def closed(self): """:return: True if the channel was closed""" return not self.channel.queue.writable() #} END interface class CallbackWriterMixin(object): """The write end of a channel which allows you to setup a callback to be called after an item was written to the channel""" # slots don't work with mixin's :( # __slots__ = ('_pre_cb') def __init__(self, *args): super(CallbackWriterMixin, self).__init__(*args) self._pre_cb = None def set_pre_cb(self, fun = lambda item: item): """ Install a callback to be called before the given item is written. It returns a possibly altered item which will be written to the channel instead, making it useful for pre-write item conversions. Providing None uninstalls the current method. :return: the previously installed function or None :note: Must be thread-safe if the channel is used in multiple threads""" prev = self._pre_cb self._pre_cb = fun return prev def write(self, item, block=True, timeout=None): if self._pre_cb: item = self._pre_cb(item) super(CallbackWriterMixin, self).write(item, block, timeout) class CallbackChannelWriter(CallbackWriterMixin, ChannelWriter): """Implements a channel writer with callback functionality""" pass class Reader(object): """Allows reading from a device""" __slots__ = tuple() #{ Interface def __init__(self, device): """Initialize the instance with the device to read from""" #{ Iterator protocol def __iter__(self): return self def next(self): """Implements the iterator protocol, iterating individual items""" items = self.read(1) if items: return items[0] raise StopIteration #} END iterator protocol #{ Interface def read(self, count=0, block=True, timeout=None): """ read a list of items read from the device. The list, as a sequence of items, is similar to the string of characters returned when reading from file like objects. :param count: given amount of items to read. If < 1, all items will be read :param block: if True, the call will block until an item is available :param timeout: if positive and block is True, it will block only for the given amount of seconds, returning the items it received so far. The timeout is applied to each read item, not for the whole operation. :return: single item in a list if count is 1, or a list of count items. If the device was empty and count was 1, an empty list will be returned. If count was greater 1, a list with less than count items will be returned. If count was < 1, a list with all items that could be read will be returned.""" raise NotImplementedError() #} END interface class ChannelReader(Reader): """Allows reading from a channel. The reader is thread-safe if the channel is as well""" __slots__ = 'channel' def __init__(self, channel): """Initialize this instance from its parent write channel""" self.channel = channel #{ Interface def read(self, count=0, block=True, timeout=None): # if the channel is closed for writing, we never block # NOTE: is handled by the queue # We don't check for a closed state here has it costs time - most of # the time, it will not be closed, and will bail out automatically once # it gets closed # in non-blocking mode, its all not a problem out = list() queue = self.channel.queue if not block: # be as fast as possible in non-blocking mode, hence # its a bit 'unrolled' try: if count == 1: out.append(queue.get(False)) elif count < 1: while True: out.append(queue.get(False)) # END for each item else: for i in xrange(count): out.append(queue.get(False)) # END for each item # END handle count except Empty: pass # END handle exceptions else: # to get everything into one loop, we set the count accordingly if count == 0: count = sys.maxint # END handle count i = 0 while i < count: try: out.append(queue.get(block, timeout)) i += 1 except Empty: # here we are only if # someone woke us up to inform us about the queue that changed # its writable state # The following branch checks for closed channels, and pulls # as many items as we need and as possible, before # leaving the loop. if not queue.writable(): try: while i < count: out.append(queue.get(False, None)) i += 1 # END count loop except Empty: break # out of count loop # END handle absolutely empty queue # END handle closed channel # if we are here, we woke up and the channel is not closed # Either the queue became writable again, which currently shouldn't # be able to happen in the channel, or someone read with a timeout # that actually timed out. # As it timed out, which is the only reason we are here, # we have to abort break # END ignore empty # END for each item # END handle blocking return out #} END interface class CallbackReaderMixin(object): """A channel which sends a callback before items are read from the channel""" # unfortunately, slots can only use direct inheritance, have to turn it off :( # __slots__ = "_pre_cb" def __init__(self, *args): super(CallbackReaderMixin, self).__init__(*args) self._pre_cb = None self._post_cb = None def set_pre_cb(self, fun = lambda count: None): """ Install a callback to call with the item count to be read before any item is actually read from the channel. Exceptions will be propagated. If a function is not provided, the call is effectively uninstalled. :return: the previously installed callback or None :note: The callback must be threadsafe if the channel is used by multiple threads.""" prev = self._pre_cb self._pre_cb = fun return prev def set_post_cb(self, fun = lambda items: items): """ Install a callback to call after items have been read, but before they are returned to the caller. The callback may adjust the items and/or the list. If no function is provided, the callback is uninstalled :return: the previously installed function""" prev = self._post_cb self._post_cb = fun return prev def read(self, count=0, block=True, timeout=None): if self._pre_cb: self._pre_cb(count) items = super(CallbackReaderMixin, self).read(count, block, timeout) if self._post_cb: items = self._post_cb(items) return items class CallbackChannelReader(CallbackReaderMixin, ChannelReader): """Implements a channel reader with callback functionality""" pass class IteratorReader(Reader): """A Reader allowing to read items from an iterator, instead of a channel. Reads will never block. Its thread-safe""" __slots__ = ("_empty", '_iter', '_lock') # the type of the lock to use when reading from the iterator lock_type = threading.Lock def __init__(self, iterator): self._empty = False if not hasattr(iterator, 'next'): raise ValueError("Iterator %r needs a next() function" % iterator) self._iter = iterator self._lock = self.lock_type() def read(self, count=0, block=True, timeout=None): """Non-Blocking implementation of read""" # not threadsafe, but worst thing that could happen is that # we try to get items one more time if self._empty: return list() # END early abort self._lock.acquire() try: if count == 0: self._empty = True return list(self._iter) else: out = list() it = self._iter for i in xrange(count): try: out.append(it.next()) except StopIteration: self._empty = True break # END handle empty iterator # END for each item to take return out # END handle count finally: self._lock.release() # END handle locking #} END classes #{ Constructors def mkchannel(ctype = Channel, wtype = ChannelWriter, rtype = ChannelReader): """ Create a channel, with a reader and a writer :return: tuple(reader, writer) :param ctype: Channel to instantiate :param wctype: The type of the write channel to instantiate :param rctype: The type of the read channel to instantiate""" c = ctype() wc = wtype(c) rc = rtype(c) return wc, rc #} END constructors
Conjuro/async
channel.py
Python
bsd-3-clause
11,309
0.041648
import asposecellscloud from asposecellscloud.CellsApi import CellsApi from asposecellscloud.CellsApi import ApiException import asposestoragecloud from asposestoragecloud.StorageApi import StorageApi apiKey = "XXXXX" #sepcify App Key appSid = "XXXXX" #sepcify App SID apiServer = "http://api.aspose.com/v1.1" data_folder = "../../data/" #Instantiate Aspose Storage API SDK storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True) storageApi = StorageApi(storage_apiClient) #Instantiate Aspose Cells API SDK api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True) cellsApi = CellsApi(api_client); #set input file name filename = "Sample_Test_Book.xls" sheetName = "Sheet2" hyperlinkIndex = 0 #upload file to aspose cloud storage storageApi.PutCreate(Path=filename, file=data_folder + filename) try: #invoke Aspose.Cells Cloud SDK API to delete a hyperlink from a worksheet response = cellsApi.DeleteWorkSheetHyperlink(name=filename, sheetName=sheetName, hyperlinkIndex=hyperlinkIndex) if response.Status == "OK": #download Workbook from storage server response = storageApi.GetDownload(Path=filename) outfilename = "c:/temp/" + filename with open(outfilename, 'wb') as f: for chunk in response.InputStream: f.write(chunk) except ApiException as ex: print "ApiException:" print "Code:" + str(ex.code) print "Message:" + ex.message
asposecells/Aspose_Cells_Cloud
Examples/Python/Examples/DeleteHyperlinksFromExcelWorksheet.py
Python
mit
1,503
0.00998
import unittest from pyramid import testing class ViewTests(unittest.TestCase): def setUp(self): self.config = testing.setUp() def tearDown(self): testing.tearDown() def test_my_view(self): from .views import my_view request = testing.DummyRequest() info = my_view(request) self.assertEqual(info['project'], 'Lexinomicon')
Dante83/lexinomicon
lexinomicon/tests.py
Python
gpl-3.0
388
0
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import sys sys.path.append('./MNIST_data') import os.path from download import download have_data = os.path.exists('MNIST_data/train-images-idx3-ubyte.gz') if not have_data: download('./MNIST_data') # load data mnist = input_data.read_data_sets("MNIST_data", one_hot=True) # batch batch_size = 64 n_batch = mnist.train.num_examples // batch_size # in [60000, 28 * 28] out [60000, 10] x = tf.placeholder(tf.float32, [None,784]) y = tf.placeholder(tf.float32, [None,10]) keep_prob = tf.placeholder(tf.float32) # 神经网络结构 784-1000-500-10 w1 = tf.Variable(tf.truncated_normal([784,1000], stddev=0.1)) b1 = tf.Variable(tf.zeros([1000]) + 0.1) l1 = tf.nn.tanh(tf.matmul(x, w1) + b1) l1_drop = tf.nn.dropout(l1, keep_prob) w2 = tf.Variable(tf.truncated_normal([1000, 500], stddev=0.1)) b2 = tf.Variable(tf.zeros([500]) + 0.1) l2 = tf.nn.tanh(tf.matmul(l1_drop, w2) + b2) l2_drop = tf.nn.dropout(l2, keep_prob) w3 = tf.Variable(tf.truncated_normal([500, 10], stddev=0.1)) b3 = tf.Variable(tf.zeros([10]) + 0.1) prediction = tf.nn.softmax(tf.matmul(l2_drop, w3) + b3) # 二次代价函数 - 回归问题 # loss = tf.losses.mean_squared_error(y, prediction) # 交叉墒-分类问题 loss = tf.losses.softmax_cross_entropy(y, prediction) # 梯度下降法优化器 train = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # save result to a bool array # 1000 0000 00 -> 0 # 0100 0000 00 -> 1 # ... correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1)) # correct rate, bool -> float ->mean accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) with tf.Session() as sess: # init variable sess.run(tf.global_variables_initializer()) for epoch in range(10): for batch in range(n_batch): # get a batch data and label batch_x, batch_y = mnist.train.next_batch(batch_size) sess.run(train, feed_dict={x:batch_x, y:batch_y, keep_prob:0.5}) acc = sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.0}) train_acc = sess.run(accuracy, feed_dict={x:mnist.train.images, y:mnist.train.labels, keep_prob:1.0}) print("Iter " + str(epoch + 1) + ", Testing Accuracy " + str(acc) + ", Training Accuracy " + str(train_acc))
shucommon/little-routine
python/AI/tensorflow/dropout.py
Python
gpl-3.0
2,353
0.00742
#!/usr/bin/env python """Do the final prediction of binding site given all features.""" from __future__ import print_function, absolute_import import pickle import os import optparse import cryptosite.config def get_matrix(inputdata, model='linear'): Res = {'CYS': (0, 0, 1, 0, 0), 'ASP': (0, 0, 0, 1, 1), 'SER': (0, 1, 1, 1, 1), 'GLN': (0, 0, 1, 0, 1), 'LYS': (0, 1, 0, 1, 1), 'ILE': (0, 1, 0, 0, 1), 'PRO': (0, 1, 1, 1, 0), 'THR': (1, 0, 0, 0, 0), 'PHE': (0, 1, 1, 0, 1), 'ALA': (0, 0, 0, 0, 0), 'GLY': (0, 0, 1, 1, 1), 'HIS': (0, 1, 0, 0, 0), 'GLU': (0, 0, 1, 1, 0), 'LEU': (0, 1, 0, 1, 0), 'ARG': (0, 0, 0, 0, 1), 'TRP': (1, 0, 0, 0, 1), 'VAL': (1, 0, 0, 1, 1), 'ASN': (0, 0, 0, 1, 0), 'TYR': (1, 0, 0, 1, 0), 'MET': (0, 1, 1, 0, 0)} Res = Res.keys() SSE = ['B', 'E', 'G', 'H', 'I', 'S', 'T', 'U'] data = open(inputdata) D = data.readlines() data.close() Header = D[0].strip().split() if model == 'poly': # the bottom visited for poly SVM visited = [Header.index('CNC_mean_300'), Header.index('SQC'), Header.index('D2S')] visited += [Header.index('SQCn'), Header.index('PCKn'), Header.index('Hn')] visited += [Header.index('CN5_std_450'), Header.index('CN5_std_300'), Header.index('CN5_std_350')] visited += [Header.index('CNC'), Header.index('PRT_std_450'), Header.index('CN5_mean_500')] visited += [Header.index('Bn'), Header.index('CHRn'), Header.index('In')] visited += [Header.index('CNC_std_300'), Header.index('CNS_300'), Header.index('SAS14_std_400')] visited += [Header.index('SASn')] elif model == 'linear': # for linear SVM visited = [Header.index('CNC_mean_300'), Header.index('SQC'), Header.index('CN5_std_450')] visited += [Header.index('D2S'), Header.index('CNS_300'), Header.index('Hn')] visited += [Header.index('CN5_mean_450'), Header.index('CN5_std_300'), Header.index('SQCn')] visited += [Header.index('CNC_std_350'), Header.index('CNCn'), Header.index('CVX_mean_450')] visited += [Header.index('In')] elif model == 'final': visited = [Header.index('CNC_mean_'), Header.index('SQC'), Header.index('PTM')] else: print('Wrong model: ', model) exit() M = [] Indeces, cnt = {}, 0 for d in D[1:]: d = d.strip().split('\t') LA = [] for hd in range(len(Header)): if hd not in visited: pass else: if hd == 1: L = [0.] * len(Res) L[Res.index(d[1])] = 1. LA += L elif hd in range(4, 8): LA += [float(d[hd])] elif hd == 8: s = [0.] * len(SSE) s[SSE.index(d[8])] = 1. LA += s else: LA += [float(d[hd])] LA += [float(d[-1])] M.append([d[0]] + LA) Indeces[cnt] = tuple(d[:3]) cnt += 1 return M, [Header[j] for j in sorted(visited)], Indeces def predict(inputdata, model='linear'): import numpy as np from sklearn.metrics import confusion_matrix print('Reading in the data ...') M, Header, Indeces = get_matrix(inputdata, model) print('Processing the data for model %s ...' % model.upper()) pdb = inputdata.split('.')[0] print(pdb) NewIndeces, newcnt = {}, 0 X_learn, Y_learn = [], [] for r, m in enumerate(M): if len(np.argwhere(np.isnan(np.array(m[1:-1])))) > 0: raise ValueError(r, m[0]) X_learn.append(np.array(m[1:-1])) Y_learn.append(m[-1]) NewIndeces[newcnt] = Indeces[r] newcnt += 1 X_learn = np.array(X_learn) X_learn = np.vstack((X_learn[:, 0], X_learn[:, 2], X_learn[:, 1])).T scaler_pkl = {'linear': 'LinearScaler_Final.pkl', 'poly': 'PolyScaler_Final.pkl', 'final': 'Scaler_Final_Final.pkl'}[model] outmodel_pkl = {'linear': 'LinearSVC_FinalModel.pkl', 'poly': 'PolySVC_FinalModel.pkl', 'final': 'SVM_Final_Final.pkl'}[model] print('Scaling ...') with open(os.path.join(cryptosite.config.datadir, scaler_pkl)) as fh: scaler = pickle.load(fh) X_learn = scaler.transform(X_learn) with open(os.path.join(cryptosite.config.datadir, outmodel_pkl)) as fh: learner = pickle.load(fh) print('Predicting ...') # Set _gamma explicitly (earlier versions of cryptosite relied on a hacked # local copy of sklearn that did this) learner._gamma = 1.0 / X_learn.shape[1] Y_pred = learner.predict(X_learn) CM = confusion_matrix(Y_learn, Y_pred) print() print("Confusion matrix for: ", pdb) print(CM) print() # output Y_pred_prob = learner.predict_proba(X_learn) Y_PRED_PROB_ALL = list(Y_pred_prob[:, 1]) suffix = {'linear': 'lin', 'poly': 'pol', 'final': 'pol'}[model] outn = open(pdb + '.%s.pred' % suffix, 'w') print('Writing output files ...') outn.write('\t'.join(['PDBID', 'Res', 'ResID'] + Header + ['CryptositeValue']) + '\n') for x in range(len(Y_PRED_PROB_ALL)): outn.write('\t'.join(list(NewIndeces[x]) + [str(i) for i in X_learn[x]] + [str(Y_PRED_PROB_ALL[x])]) + '\n') outn.close() write_pdb(pdb, model) print('Done!') def write_pdb(pdb, model='linear'): suffix = {'linear': 'lin', 'poly': 'pol', 'final': 'pol'}[model] with open(pdb + '.%s.pred' % suffix) as data: D = data.readlines() out = open('%s.%s.pred.pdb' % (pdb, suffix), 'w') Data = {} for d in D: d = d.strip().split() Data[(d[1], d[2])] = ('0.0', d[-1]) with open('%s_mdl.pdb' % pdb.split('/')[-1]) as data: D = data.readlines() for d in D: if 'ATOM' == d[:4]: p = (d[17:20], str(int(d[22:26]))) try: pred = Data[p] except KeyError: pred = ('0.0', '0.0') v = '%.2f' % (float(pred[1]) * 100) v = (6 - len(v)) * ' ' + v line = d[:56] + pred[0] + '0' + v + '\n' out.write(line) else: out.write(d) out.close() def parse_args(): usage = """%prog [opts] <model_name> Do the final prediction of binding site given all features. <model_name> should be the name of the model. The model's 3D structure, <model_name>_mdl.pdb, and the features file, <model_name>.features, are read in from the current directory. Two files are generated on successful prediction: <model_name>.pol.pred: a simple tab-separated file listing the value of the CryptoSite score for each residue. <model_name>.pol.pred.pdb: a PDB file with the CryptoSite score in the occupancy column, for visualization. """ parser = optparse.OptionParser(usage) opts, args = parser.parse_args() if len(args) != 1: parser.error("incorrect number of arguments") return args[0] def main(): model_name = parse_args() predict(model_name + '.features', model='final') if __name__ == '__main__': main()
salilab/cryptosite
lib/cryptosite/predict.py
Python
lgpl-2.1
7,574
0
#!/usr/bin/env python3 import sys from util import proctal_cli, sleeper class Error(Exception): pass class TestSingleValue: def __init__(self, type, value): self.type = type self.value = value pass def run(self, guinea): address = proctal_cli.allocate(guinea.pid(), self.value.size()) try: writer = proctal_cli.write(guinea.pid(), address, self.type, binary=True) try: writer.write_value(self.value) writer.stop() reader = proctal_cli.read(guinea.pid(), address, self.type) try: value = reader.next_value() if self.value.cmp(value) != 0: raise Error("Expected {expected} but got {found}.".format(expected=self.value, found=value)) finally: reader.stop() finally: writer.stop() finally: proctal_cli.deallocate(guinea.pid(), address) int32 = proctal_cli.TypeInteger(32); int32_test_val = proctal_cli.ValueInteger(int32) int32_test_val.parse(0x0ACC23AA) tests = [ TestSingleValue(int32, int32_test_val) ] guinea = sleeper.run() try: for test in tests: test.run(guinea) finally: guinea.stop()
daniel-araujo/proctal
src/cli/tests/write-binary.py
Python
gpl-3.0
1,310
0.00458
# mxacquisition.py # # Copyright (C) 2014 Diamond Light Source, Karl Levik # # 2014-09-24 # # Methods to store MX acquisition data # import copy from ispyb.sp.acquisition import Acquisition from ispyb.strictordereddict import StrictOrderedDict class MXAcquisition(Acquisition): """MXAcquisition provides methods to store data in the MX acquisition tables.""" def __init__(self): self.insert_data_collection_group = super().upsert_data_collection_group self.insert_data_collection = super().upsert_data_collection self.update_data_collection_group = super().upsert_data_collection_group self.update_data_collection = super().upsert_data_collection _image_params = StrictOrderedDict( [ ("id", None), ("parentid", None), ("img_number", None), ("filename", None), ("file_location", None), ("measured_intensity", None), ("jpeg_path", None), ("jpeg_thumb_path", None), ("temperature", None), ("cumulative_intensity", None), ("synchrotron_current", None), ("comments", None), ("machine_msg", None), ] ) _dcg_grid_params = StrictOrderedDict( [ ("id", None), ("parentid", None), ("dxInMm", None), ("dyInMm", None), ("stepsX", None), ("stepsY", None), ("meshAngle", None), ("pixelsPerMicronX", None), ("pixelsPerMicronY", None), ("snapshotOffsetXPixel", None), ("snapshotOffsetYPixel", None), ("orientation", None), ("snaked", None), ] ) _dc_grid_params = StrictOrderedDict( [ ("id", None), ("parentid", None), ("dxInMm", None), ("dyInMm", None), ("stepsX", None), ("stepsY", None), ("meshAngle", None), ("pixelsPerMicronX", None), ("pixelsPerMicronY", None), ("snapshotOffsetXPixel", None), ("snapshotOffsetYPixel", None), ("orientation", None), ("snaked", None), ] ) _dc_position_params = StrictOrderedDict( [ ("id", None), ("pos_x", None), ("pos_y", None), ("pos_z", None), ("scale", None), ] ) _energy_scan_params = StrictOrderedDict( [ ("id", None), ("session_id", None), ("sample_id", None), ("sub_sample_id", None), ("start_time", None), ("end_time", None), ("start_energy", None), ("end_energy", None), ("detector", None), ("element", None), ("edge_energy", None), ("synchrotron_current", None), ("temperature", None), ("peak_energy", None), ("peak_f_prime", None), ("peak_f_double_prime", None), ("inflection_energy", None), ("inflection_f_prime", None), ("inflection_f_double_prime", None), ("chooch_file_full_path", None), ("jpeg_chooch_file_full_path", None), ("scan_file_full_path", None), ("beam_size_horizontal", None), ("beam_size_vertical", None), ("exposure_time", None), ("transmission", None), ("flux", None), ("flux_end", None), ("comments", None), ] ) # Is xrayDose populated in EnergyScan? Is it relevant? _fluo_spectrum_params = StrictOrderedDict( [ ("id", None), ("session_id", None), ("sample_id", None), ("sub_sample_id", None), ("start_time", None), ("end_time", None), ("energy", None), ("file_name", None), ("annotated_pymca_spectrum", None), ("fitted_data_file_full_path", None), ("jpeg_scan_file_full_path", None), ("scan_file_full_path", None), ("beam_size_horizontal", None), ("beam_size_vertical", None), ("exposure_time", None), ("transmission", None), ("flux", None), ("flux_end", None), ("comments", None), ] ) _fluo_mapping_params = StrictOrderedDict( [ ("id", None), ("roi_id", None), ("grid_info_id", None), ("data_format", None), ("data", None), ("points", None), ("opacity", 1), ("colour_map", None), ("min", None), ("max", None), ("program_id", None), ] ) _fluo_mapping_roi_params = StrictOrderedDict( [ ("id", None), ("start_energy", None), ("end_energy", None), ("element", None), ("edge", None), ("r", None), ("g", None), ("b", None), ("sample_id", None), ("scalar", None), ] ) def upsert_xray_centring_result( self, result_id=None, grid_info_id=None, method=None, status=None, x=None, y=None, ): """Insert or update the xray centring result associated with a grid info :return: The xray centring result id. """ return self.get_connection().call_sp_write( procname="upsert_xray_centring_result", args=[result_id, grid_info_id, method, status, x, y], ) @classmethod def get_dc_position_params(cls): return copy.deepcopy(cls._dc_position_params) def update_dc_position(self, values): """Update the position info associated with a data collection""" return self.get_connection().call_sp_write("update_dc_position", values) @classmethod def get_dcg_grid_params(cls): return copy.deepcopy(cls._dcg_grid_params) def upsert_dcg_grid(self, values): """Insert or update the grid info associated with a data collection group""" return self.get_connection().call_sp_write("upsert_dcg_grid", values) def retrieve_dcg_grid(self, dcgid, auth_login=None): """Retrieve a list of dictionaries containing the grid information for one data collection group id. Raises ISPyBNoResultException if there is no grid information available for the given DCGID. Generally the list will only contain a single dictionary. """ return self.get_connection().call_sp_retrieve( procname="retrieve_grid_info_for_dcg_v2", args=(dcgid, auth_login) ) @classmethod def get_dc_grid_params(cls): return copy.deepcopy(cls._dc_grid_params) def upsert_dc_grid(self, values): """Insert or update the grid info associated with a data collection""" return self.get_connection().call_sp_write("upsert_dc_grid", values) def retrieve_dc_grid(self, dcid, auth_login=None): """Retrieve a list of dictionaries containing the grid information for one data collection id. Raises ISPyBNoResultException if there is no grid information available for the given DCID. Generally the list will only contain a single dictionary. """ return self.get_connection().call_sp_retrieve( procname="retrieve_grid_info_for_dc", args=(dcid, auth_login) ) @classmethod def get_energy_scan_params(cls): return copy.deepcopy(cls._energy_scan_params) def upsert_energy_scan(self, values): """Insert or update energy scan a.k.a. edge scan""" return self.get_connection().call_sp_write("upsert_energy_scan", values) @classmethod def get_fluo_spectrum_params(cls): return copy.deepcopy(cls._fluo_spectrum_params) def upsert_fluo_spectrum(self, values): """Insert or update XR fluorescence spectrum a.k.a. MCA spectrum""" return self.get_connection().call_sp_write("upsert_xfe_fluo_spectrum", values) @classmethod def get_fluo_mapping_params(cls): return copy.deepcopy(cls._fluo_mapping_params) def upsert_fluo_mapping(self, values): """Insert or update XR fluorescence mapping""" return self.get_connection().call_sp_write("upsert_fluo_mapping", values) @classmethod def get_fluo_mapping_roi_params(cls): return copy.deepcopy(cls._fluo_mapping_roi_params) def upsert_fluo_mapping_roi(self, values): """Insert or update XR fluorescence mapping region of interest""" return self.get_connection().call_sp_write("upsert_fluo_mapping_roi", values) @classmethod def get_image_params(cls): return copy.deepcopy(cls._image_params) def upsert_image(self, values): """Insert or update MX diffraction image.""" return self.get_connection().call_sf_write("upsert_image", values)
DiamondLightSource/ispyb-api
src/ispyb/sp/mxacquisition.py
Python
apache-2.0
9,112
0.001097
# -*- coding: utf8 -*- import logging from logging.handlers import RotatingFileHandler from babel.dates import format_datetime, datetime from time import sleep from traceback import print_exception, format_exception class LogFile(logging.Logger): '''rotatively logs erverything ''' def initself(self): self.setLevel(logging.DEBUG) self.handler = RotatingFileHandler( 'app.log', # maxBytes=2000, # approximatively 100 line (81) maxBytes=6000, backupCount=3 # number of log backup files ) self.addHandler(self.handler) def p_log(self, msg, **kwargs): '''level = {info, warning, debug, error} you can also use an exception=exc_info() argument to uprising exceptions! ''' logger = self if 'error' in kwargs: print('error YES') kwargs['level'] = 'error' if 'exception' in kwargs: print('exception YES') kwargs['level'] = 'exception' if 'level' in kwargs: level = kwargs['level'] else: level = "info" # warning: error must be a python error formating! if level == 'error': # or whatever you want with more details message = ">> " + kwargs['error'][1].message # exc_info()[1].message eval("logger." + level + "(\"" + message + "\")") elif level == 'exception': message = ">> UPRISING OF AN EXCEPTION!" eval("logger." + level + "(\"" + message + "\")") for line in format_exception(kwargs['exception'][0], kwargs['exception'][1], kwargs['exception'][2]): logger.error(line) else: if 'newline' in kwargs: for i in range(kwargs['newline']): eval("logger." + level + "(\"" + "\")") if 'blank' in kwargs: if kwargs['blank']: message = msg else: message = format_datetime(datetime.now(), "HH:mm:ss", locale='en')\ + " (" + level + ") > "\ + msg eval("logger." + level + "(\"" + message + "\")") if __name__ == '__main__': logger = LogFile('app.log') logger.initself() for i in range(10): sleep(.5) logger.p_log('coucou', level="warning")
littleDad/mesLucioles
logger_04.py
Python
gpl-2.0
2,415
0.003313
# -*- coding: cp1252 -*- #Codename Octohax #To find Octohax offsets on newer versions, dump memory #in that area, eg 0x10500000 to 0x10700000, open in hex #editor, search "Tnk_Simple", there are only 2 results #Also search for Player00 #There should be like a result or two before what you want #Looks like this: ''' .k.Œ.....k. Riva l00.Rival00_Hlf. Rival_Squid.Play er00_anim...Play er_Squid_anim... Player01_anim... Player00....Play er00_Hlf....Play er_Squid....Play er01....Player01 _Hlf....ToSquid. ToHuman.Sqd_Jet. ''' #Then dump 0x12000000 to 0x13000000, search for Tnk_Simple, #should be first result, with three of them in a row with spacing from tcpgecko import TCPGecko import sys sys.argv.append("280") tcp = TCPGecko("192.168.1.82") if sys.argv[1] == "100": #For 1.0.0-? tcp.writestr(0x105068F0, b"Tnk_Rvl00") tcp.writestr(0x1051A500, b"Tnk_Rvl00") tcp.writestr(0x105DBFE0, b"Rival00") tcp.writestr(0x105DBFEC, b"Rival00_Hlf") tcp.writestr(0x105DBFFC, b"Rival_Squid") #tcp.pokemem(0x12CB05A0, 42069) elif sys.argv[1] == "130": #for 1.3.0 tcp.writestr(0x105068F0, b"Tnk_Rvl00") tcp.writestr(0x105D4000, b"Tnk_Rvl00") tcp.writestr(0x105DC118, b"Rival00") tcp.writestr(0x105DC124, b"Rival00_Hlf") tcp.writestr(0x105DC134, b"Rival_Squid") #tcp.pokemem(0x12CB07A0, 42069) elif sys.argv[1] == "200": #For 2.0.0 tcp.writestr(0x10506AB0, b"Tnk_Rvl00") tcp.writestr(0x105E0278, b"Tnk_Rvl00") tcp.writestr(0x105E85B0, b"Rival00") tcp.writestr(0x105E85BC, b"Rival00_Hlf") tcp.writestr(0x105E85CC, b"Rival_Squid") tcp.writestr(0x12BE2350, b"Tnk_Rvl00") tcp.writestr(0x12BE239C, b"Tnk_Rvl00") tcp.writestr(0x12BE23E8, b"Tnk_Rvl00") elif sys.argv[1] == "210": #For 2.1.0 tcp.writestr(0x10506AF8, b"Tnk_Rvl00") tcp.writestr(0x105E0350, b"Tnk_Rvl00") tcp.writestr(0x105E8698, b"Rival00") tcp.writestr(0x105E86A4, b"Rival00_Hlf") tcp.writestr(0x105E86B4, b"Rival_Squid") tcp.writestr(0x12BE2350, b"Tnk_Rvl00") tcp.writestr(0x12BE239C, b"Tnk_Rvl00") tcp.writestr(0x12BE23E8, b"Tnk_Rvl00") tcp.pokemem(0x12CC7C80, 0x00000000) #Enforce Female Inkling elif sys.argv[1] == "220": #For 2.2.0 tcp.writestr(0x10506AF8, b"Tnk_Rvl00") tcp.writestr(0x105E0350, b"Tnk_Rvl00") tcp.writestr(0x105EB040, b"Rival00") tcp.writestr(0x105EB04C, b"Rival00_Hlf") tcp.writestr(0x105EB05C, b"Rival_Squid") tcp.writestr(0x12BE5350, b"Tnk_Rvl00") tcp.writestr(0x12BE539C, b"Tnk_Rvl00") tcp.writestr(0x12BE53E8, b"Tnk_Rvl00") tcp.pokemem(0x12CCAC80, 0x00000000) #Enforce Female Inkling elif sys.argv[1] == "230": #For 2.3.0 tcp.writestr(0x10506AF8, b"Tnk_Rvl00") tcp.writestr(0x105E3BB8, b"Tnk_Rvl00") tcp.writestr(0x105EBF98, b"Rival00") tcp.writestr(0x105EBFA4, b"Rival00_Hlf") tcp.writestr(0x105EBFB4, b"Rival_Squid") tcp.writestr(0x12BE6350, b"Tnk_Rvl00") tcp.writestr(0x12BE639C, b"Tnk_Rvl00") tcp.writestr(0x12BE63E8, b"Tnk_Rvl00") tcp.pokemem(0x12CCBB90, 0x00000000) #Enforce Female Inkling elif sys.argv[1] == "240": #For 2.4.0 tcp.writestr(0x10506AF8, b"Tnk_Rvl00") tcp.writestr(0x105E4EA0, b"Tnk_Rvl00") tcp.writestr(0x105ED7B8, b"Rival00") tcp.writestr(0x105ED7C4, b"Rival00_Hlf") tcp.writestr(0x105ED7D4, b"Rival_Squid") tcp.writestr(0x12BE8350, b"Tnk_Rvl00") tcp.writestr(0x12BE839C, b"Tnk_Rvl00") tcp.writestr(0x12BE83E8, b"Tnk_Rvl00") tcp.pokemem(0x12CCDB90, 0x00000000) #Enforce Female Inkling elif sys.argv[1] == "250": #For 2.5.0 tcp.writestr(0x10506AF8, b"Tnk_Rvl00") tcp.writestr(0x105E4EB8, b"Tnk_Rvl00") tcp.writestr(0x105ED7D0, b"Rival00") tcp.writestr(0x105ED7DC, b"Rival00_Hlf") #Don't really need squid, looks bad without proper bone offsets #tcp.writestr(0x105ED7D4, b"Rival_Squid") tcp.writestr(0x12BE8350, b"Tnk_Rvl00") tcp.writestr(0x12BE839C, b"Tnk_Rvl00") tcp.writestr(0x12BE83E8, b"Tnk_Rvl00") tcp.pokemem(0x12CCDB90, 0x00000000) #Enforce Female Inkling elif sys.argv[1] == "260": #For 2.6.0 tcp.writestr(0x10506B28, b"Tnk_Rvl00") tcp.writestr(0x105E59B8, b"Tnk_Rvl00") tcp.writestr(0x105EE350, b"Rival00") tcp.writestr(0x105EE35C, b"Rival00_Hlf") #Don't really need squid, looks bad without proper bone offsets #tcp.writestr(0x105EE36C, b"Rival_Squid") tcp.writestr(0x12BE9354, b"Tnk_Rvl00") tcp.writestr(0x12BE93A0, b"Tnk_Rvl00") tcp.writestr(0x12BE93EC, b"Tnk_Rvl00") tcp.pokemem(0x12CCF990, 0x00000000) #Enforce Female Inkling elif sys.argv[1] == "270": #For 2.7.0 tcp.writestr(0x10506B58, b"Tnk_Rvl00") tcp.writestr(0x105E5F40, b"Tnk_Rvl00") tcp.writestr(0x105EE968, b"Rival00") tcp.writestr(0x105EE974, b"Rival00_Hlf") #Don't really need squid, looks bad without proper bone offsets #tcp.writestr(0x105EE984, b"Rival_Squid") tcp.writestr(0x12BEA354, b"Tnk_Rvl00") tcp.writestr(0x12BEA3A0, b"Tnk_Rvl00") tcp.writestr(0x12BEA3EC, b"Tnk_Rvl00") tcp.pokemem(0x12CD0D90, 0x00000000) #Enforce Female Inkling elif sys.argv[1] == "280": #For 2.8.0 tcp.writestr(0x10506B58, b"Tnk_Rvl00") tcp.writestr(0x105E6000, b"Tnk_Rvl00") tcp.writestr(0x105EEA28, b"Rival00") tcp.writestr(0x105EEA34, b"Rival00_Hlf") #Don't really need squid, looks bad without proper bone offsets #tcp.writestr(0x105EE9A44, b"Rival_Squid") tcp.writestr(0x12C1F354, b"Tnk_Rvl00") tcp.writestr(0x12C1F3A0, b"Tnk_Rvl00") tcp.writestr(0x12C1F3EC, b"Tnk_Rvl00") tcp.pokemem(0x12D05D90, 0x00000000) #Enforce Female Inkling tcp.s.close() print("Done.")
XBigTK13X/wiiu-memshark
vendor/tcpgecko/octoling.py
Python
mit
5,604
0.010171
from __future__ import division import math import numpy as np import networkx as nx from sklearn.preprocessing import normalize from kilogram import NgramService class Signature(object): vector = None mapping = None def __init__(self, vector, G, candidate_uris): """ :type candidate_uris: set """ self.vector = vector self.mapping = [] for prob, uri in zip(vector, G.nodes()): if uri in candidate_uris: self.mapping.append((prob, uri)) self.mapping.sort(reverse=True) def __repr__(self): return str(self.mapping[:10]) def _mention_uri(uri, mention): return mention.replace(' ', '_')+'|'+uri def _candidate_filter(candidates): def string_similar(candidate_, topn=10): substring_similar = [e for e in candidate_.entities if set(candidate_.cand_string.lower().split()).intersection(e.uri.lower().split('_'))] if len(substring_similar) >= topn: return substring_similar substring_similar2 = [e for e in candidate_.entities if candidate_.cand_string in e.uri.replace('_', ' ')] substring_similar.extend(substring_similar2) return substring_similar[:topn] def top_prior(candidate_, topn=10): return sorted(candidate_.entities, key=lambda e: e.count, reverse=True)[:topn] for candidate in candidates: entities = top_prior(candidate) uris = set(e.uri for e in entities) entities.extend([e for e in string_similar(candidate) if e.uri not in uris]) candidate.entities = entities ALPHA = 0.15 # restart probability class SemanticGraph: G = None candidates = None matrix = None # map candidate urls to indexes in the matrix index_map = None candidate_uris = None def __init__(self, candidates): self.G = nx.Graph() _candidate_filter(candidates) self.candidates = candidates neighbors = {} self.index_map = {} #self.candidate_uris1 = set() #for cand in candidates: # self.candidate_uris1.add(cand.cand_string) self.candidate_uris = set() for cand in candidates: total = sum([e.count for e in cand.entities]) for e in cand.entities: mention_uri = _mention_uri(e.uri, cand.cand_string) self.candidate_uris.add(mention_uri) neighbors[mention_uri] = NgramService.get_wiki_link_mention_cooccur(mention_uri) # delete self try: del neighbors[mention_uri][mention_uri] except KeyError: pass for neighbor, weight in neighbors[mention_uri].iteritems(): #if neighbor.split('|')[0] not in self.candidate_uris1: # continue if self.G.has_edge(mention_uri, neighbor): continue try: self.G.add_edge(mention_uri, neighbor, {'w': int(weight)}) # happens because of malformed links except ValueError: pass # always add candidates self.G.add_node(mention_uri, {'prior': e.count/total}) # prune 1-degree edges except original candidates to_remove = set() for node, degree in self.G.degree_iter(): if degree <= 1: to_remove.add(node) to_remove = to_remove.difference(self.candidate_uris) self.G.remove_nodes_from(to_remove) if self.G.number_of_nodes() > 0: self.matrix = nx.to_scipy_sparse_matrix(self.G, weight='w', dtype=np.float64) self.matrix = normalize(self.matrix, norm='l1', axis=1) for i, uri in enumerate(self.G.nodes()): self.index_map[uri] = i def _get_entity_teleport_v(self, i): teleport_vector = np.zeros((self.matrix.shape[0], 1), dtype=np.float64) teleport_vector[i] = 1-ALPHA return np.matrix(teleport_vector) def _get_doc_teleport_v(self): teleport_vector = np.zeros((self.matrix.shape[0], 1), dtype=np.float64) resolved = [self.index_map[_mention_uri(x.resolved_true_entity, x.cand_string)] for x in self.candidates if x.resolved_true_entity is not None] if len(resolved) > 0: for i in resolved: teleport_vector[i] = 1-ALPHA else: # assign according to prior probabilities for candidate in self.candidates: total_uri_count = sum([e.count for e in candidate.entities], 1) for e in candidate.entities: teleport_vector[self.index_map.get(_mention_uri(e.uri, candidate.cand_string))] = e.count/total_uri_count return np.matrix(teleport_vector) def _learn_eigenvector(self, teleport_vector): pi = np.matrix(np.zeros(teleport_vector.shape)) prev_norm = 0 for _ in range(10000): pi = self.matrix*pi*ALPHA + teleport_vector cur_norm = np.linalg.norm(pi) pi /= cur_norm if prev_norm and abs(cur_norm - prev_norm) < 0.00001: break prev_norm = cur_norm return np.ravel(pi/pi.sum()) def doc_signature(self): """compute document signature""" return Signature(self._learn_eigenvector(self._get_doc_teleport_v()), self.G, self.candidate_uris) def compute_signature(self, mention_uri): sig = Signature(self._learn_eigenvector(self._get_entity_teleport_v(self.index_map[mention_uri])), self.G, self.candidate_uris) return sig def _zero_kl_score(self, p, q): """ :type p: Signature :type q: Signature :return: Zero Kullback-Leiber divergence score """ total = 0 for p_i, q_i in zip(p.vector, q.vector): if q_i == 0: total += p_i*20 elif p_i > 0: total += p_i*math.log(p_i/q_i) return total def do_linking(self): # link unambiguous first for candidate in self.candidates: if len(candidate.entities) == 1: candidate.resolved_true_entity = candidate.entities[0].uri for candidate in sorted(self.candidates, key=lambda x: len(x.entities)): if candidate.truth_data['uri'] is None: continue if not candidate.entities or candidate.resolved_true_entity: continue doc_sign = self.doc_signature() cand_scores = [] for e in candidate.entities: e_sign = self.compute_signature(_mention_uri(e.uri, candidate.cand_string)) # global similarity + local (prior prob) sem_sim = 1/self._zero_kl_score(e_sign, doc_sign) cand_scores.append((e.uri, sem_sim)) max_uri, score = max(cand_scores, key=lambda x: x[1]) candidate.resolved_true_entity = max_uri if candidate.resolved_true_entity != candidate.truth_data['uri']: print candidate, candidate.truth_data['uri']
dragoon/kilogram
kilogram/entity_linking/mention_rw/__init__.py
Python
apache-2.0
7,287
0.002196
# https://projecteuler.net/problem=1 # If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. # Find the sum of all the multiples of 3 or 5 below 1000. # = 233168 import sys def sum(n): total = 0 for i in range(n): if (i % 3 == 0) or (i % 5 == 0): total += i return total n = 20 if len(sys.argv) == 2: n = int(sys.argv[1]) print(sum(n))
weyw/eulerproject
wey/p1.py
Python
gpl-2.0
458
0.00655
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import unicode_literals import copy import glob import json import os import unittest from pymatgen import Molecule from pymatgen.io.qchem import QcTask, QcInput, QcOutput from pymatgen.util.testing import PymatgenTest __author__ = 'xiaohuiqu' test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", 'test_files', "molecules") coords = [[0.000000, 0.000000, 0.000000], [0.000000, 0.000000, 1.089000], [1.026719, 0.000000, -0.363000], [-0.513360, -0.889165, -0.363000], [-0.513360, 0.889165, -0.363000]] mol = Molecule(["C", "H", "H", "H", "Cl"], coords) coords2 = [[0.0, 0.0, -2.4], [0.0, 0.0, 0.0], [0.0, 0.0, 2.4]] heavy_mol = Molecule(["Br", "Cd", "Br"], coords2) coords3 = [[2.632273, -0.313504, -0.750376], [3.268182, -0.937310, -0.431464], [2.184198, -0.753305, -1.469059]] water_mol = Molecule(["O", "H", "H"], coords3) class QcTaskTest(PymatgenTest): def elementary_io_verify(self, text, qctask): self.to_and_from_dict_verify(qctask) self.from_string_verify(contents=text, ref_dict=qctask.as_dict()) def to_and_from_dict_verify(self, qctask): """ Helper function. This function should be called in each specific test. """ d1 = qctask.as_dict() qc2 = QcTask.from_dict(d1) d2 = qc2.as_dict() self.assertEqual(d1, d2) def from_string_verify(self, contents, ref_dict): qctask = QcTask.from_string(contents) d2 = qctask.as_dict() self.assertEqual(ref_dict, d2) def test_read_zmatrix(self): contents = '''$moLEcule 1 2 S C 1 1.726563 H 2 1.085845 1 119.580615 C 2 1.423404 1 114.230851 3 -180.000000 0 H 4 1.084884 2 122.286346 1 -180.000000 0 C 4 1.381259 2 112.717365 1 0.000000 0 H 6 1.084731 4 127.143779 2 -180.000000 0 C 6 1.415867 4 110.076147 2 0.000000 0 F 8 1.292591 6 124.884374 4 -180.000000 0 $end $reM BASIS = 6-31+G* EXCHANGE = B3LYP jobtype = freq $end ''' qctask = QcTask.from_string(contents) ans = '''$molecule 1 2 S 0.00000000 0.00000000 0.00000000 C 0.00000000 0.00000000 1.72656300 H -0.94431813 0.00000000 2.26258784 C 1.29800105 -0.00000002 2.31074808 H 1.45002821 -0.00000002 3.38492732 C 2.30733813 -0.00000003 1.36781908 H 3.37622632 -0.00000005 1.55253338 C 1.75466906 -0.00000003 0.06427152 F 2.44231414 -0.00000004 -1.03023099 $end $rem jobtype = freq exchange = b3lyp basis = 6-31+g* $end ''' ans_tokens = ans.split('\n') ans_text_part = ans_tokens[:2] + ans_tokens[11:] ans_coords_part = ans_tokens[2:11] converted_tokens = str(qctask).split('\n') converted_text_part = converted_tokens[:2] + converted_tokens[11:] converted_coords_part = converted_tokens[2:11] self.assertEqual(ans_text_part, converted_text_part) for ans_coords, converted_coords in zip(ans_coords_part, converted_coords_part): ans_coords_tokens = ans_coords.split() converted_coords_tokens = converted_coords.split() self.assertEqual(ans_coords_tokens[0], converted_coords_tokens[0]) xyz1 = ans_coords_tokens[1:] xyz2 = converted_coords_tokens[1:] for t1, t2 in zip(xyz1, xyz2): self.assertTrue(abs(float(t1)-float(t2)) < 0.0001) def test_no_mol(self): ans = '''$comment Test Methane $end $molecule -1 2 read $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* $end ''' qctask = QcTask(molecule="READ", title="Test Methane", exchange="B3LYP", jobtype="SP", charge=-1, spin_multiplicity=2, basis_set="6-31+G*") self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_simple_basis_str(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_fragmented_molecule(self): mol1 = copy.deepcopy(mol) mol1.set_charge_and_spin(1, 2) mol2 = copy.deepcopy(water_mol) mol2.set_charge_and_spin(-1, 2) qctask = QcTask([mol1, mol2], title="Test Fragments", exchange="B3LYP", jobtype="bsse", charge=0, spin_multiplicity=3, basis_set="6-31++G**") ans = """$comment Test Fragments $end $molecule 0 3 -- 1 2 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 -- -1 2 O 2.63227300 -0.31350400 -0.75037600 H 3.26818200 -0.93731000 -0.43146400 H 2.18419800 -0.75330500 -1.46905900 $end $rem jobtype = bsse exchange = b3lyp basis = 6-31++g** $end """ self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_mixed_basis_str(self): qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set=[("C", "6-311G*"), ("H", "6-31g(d,p)"), ("H", "6-31g(d,p)"), ("H", "6-31g*"), ("cl", "6-31+g*")]) ans_mixed = """$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = mixed $end $basis C 1 6-311g* **** H 2 6-31g(d,p) **** H 3 6-31g(d,p) **** H 4 6-31g* **** Cl 5 6-31+g* **** $end """ self.assertEqual(ans_mixed, str(qctask)) self.elementary_io_verify(ans_mixed, qctask) qctask.set_basis_set("6-31+G*") ans_simple = """$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* $end """ self.assertEqual(str(qctask), ans_simple) qctask.set_basis_set([("C", "6-311G*"), ("H", "6-31g(d,p)"), ("H", "6-31g(d,p)"), ("H", "6-31g*"), ("cl", "6-31+g*")]) self.assertEqual(str(qctask), ans_mixed) self.elementary_io_verify(ans_mixed, qctask) def test_opt_constraint_str(self): opt_coords = [[-1.8438708, 1.7639844, 0.0036111], [-0.3186117, 1.7258535, 0.0241264], [0.1990523, 0.2841796, -0.0277432], [1.7243049, 0.2460376, -0.0067397], [-2.1904881, 2.8181992, 0.0419217], [-2.2554858, 1.2221552, 0.8817436], [-2.2293542, 1.2964646, -0.9274861], [0.0400963, 2.2185950, 0.9541706], [0.0663274, 2.2929337, -0.8514870], [-0.1594453, -0.2084377, -0.9579392], [-0.1860888, -0.2830148, 0.8477023], [2.1362687, 0.7881530, -0.8845274], [2.0709344, -0.8081667, -0.0452220], [2.1094213, 0.7132527, 0.9246668]] opt_mol = Molecule(["C", "C", "C", "C", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H"], opt_coords) constraint_dict = {'opt': [['tors', 1, 2, 3, 4, 180.0]]} ans = """$molecule 0 1 C -1.84387080 1.76398440 0.00361110 C -0.31861170 1.72585350 0.02412640 C 0.19905230 0.28417960 -0.02774320 C 1.72430490 0.24603760 -0.00673970 H -2.19048810 2.81819920 0.04192170 H -2.25548580 1.22215520 0.88174360 H -2.22935420 1.29646460 -0.92748610 H 0.04009630 2.21859500 0.95417060 H 0.06632740 2.29293370 -0.85148700 H -0.15944530 -0.20843770 -0.95793920 H -0.18608880 -0.28301480 0.84770230 H 2.13626870 0.78815300 -0.88452740 H 2.07093440 -0.80816670 -0.04522200 H 2.10942130 0.71325270 0.92466680 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* $end $opt CONSTRAINT tors 1 2 3 4 180.0 ENDCONSTRAINT $end """ qctask = QcTask(opt_mol, exchange="B3LYP", jobtype="SP", basis_set="6-31+G*", optional_params=constraint_dict) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_partial_hessian(self): qcinp1 = QcInput.from_file(os.path.join(test_dir, "partial_hessian.qcinp")) ans = """$molecule 0 1 C -1.76827000 0.46495000 0.28695000 O 1.78497000 -0.42034000 -0.39845000 H -0.77736000 0.78961000 0.66548000 H -1.75896000 0.46604000 -0.82239000 H -2.54983000 1.16313000 0.65101000 H -1.98693000 -0.55892000 0.65381000 H 2.14698000 -0.07173000 0.45530000 H 1.25596000 -1.21510000 -0.13726000 $end $rem jobtype = freq exchange = b3lyp basis = 6-31g* n_sol = 3 phess = true $end $alist 3 7 8 $end """ self.assertEqual(ans, str(qcinp1)) self.elementary_io_verify(ans, qcinp1.jobs[0]) qcinp1.jobs[0].params["rem"]["jobtype"] = "sp" qcinp1.jobs[0].params["rem"]["phess"] = 3 qcinp1.jobs[0].set_partial_hessian_atoms([2, 3, 4, 5, 6]) ans = """$molecule 0 1 C -1.76827000 0.46495000 0.28695000 O 1.78497000 -0.42034000 -0.39845000 H -0.77736000 0.78961000 0.66548000 H -1.75896000 0.46604000 -0.82239000 H -2.54983000 1.16313000 0.65101000 H -1.98693000 -0.55892000 0.65381000 H 2.14698000 -0.07173000 0.45530000 H 1.25596000 -1.21510000 -0.13726000 $end $rem jobtype = freq exchange = b3lyp basis = 6-31g* n_sol = 5 phess = True $end $alist 2 3 4 5 6 $end """ self.assertEqual(ans, str(qcinp1)) def test_basis2_mixed(self): qcinp1 = QcInput.from_file(os.path.join(test_dir, "basis2_mixed.inp")) ans = """$molecule 0 1 C -1.76827000 0.46495000 0.28695000 O 1.78497000 -0.42034000 -0.39845000 H -0.77736000 0.78961000 0.66548000 H -1.75896000 0.46604000 -0.82239000 H -2.54983000 1.16313000 0.65101000 H -1.98693000 -0.55892000 0.65381000 H 2.14698000 -0.07173000 0.45530000 H 1.25596000 -1.21510000 -0.13726000 $end $rem jobtype = sp exchange = b3lyp basis = mixed basis2 = basis2_mixed purecart = 1111 $end $basis C 1 6-311+g(3df) **** O 2 aug-cc-pvtz **** H 3 6-31g* **** H 4 6-31g* **** H 5 6-31g* **** H 6 6-31g* **** H 7 cc-pvdz **** H 8 cc-pvdz **** $end $basis2 C 1 sto-3g **** O 2 sto-3g **** H 3 sto-3g **** H 4 sto-3g **** H 5 sto-3g **** H 6 sto-3g **** H 7 sto-3g **** H 8 sto-3g **** $end """ self.assertEqual(str(qcinp1), ans) self.elementary_io_verify(ans, qcinp1.jobs[0]) basis2 = qcinp1.jobs[0].params["basis2"] qcinp2 = copy.deepcopy(qcinp1) qcinp2.jobs[0].set_basis2("3-21g") self.assertEqual(qcinp2.jobs[0].params["rem"]["basis2"], "3-21g") self.assertFalse("basis2" in qcinp2.jobs[0].params) qcinp2.jobs[0].set_basis2(basis2) self.assertEqual(str(qcinp2), ans) def test_aux_basis_str(self): ans_gen = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = freq exchange = xygjos basis = gen aux_basis = gen $end $aux_basis C rimp2-cc-pvdz **** Cl rimp2-aug-cc-pvdz **** H rimp2-cc-pvdz **** $end $basis C 6-31g* **** Cl 6-31+g* **** H 6-31g* **** $end ''' qctask = QcTask(mol, title="Test Methane", exchange="xygjos", jobtype="Freq", basis_set={"C": "6-31G*", "h": "6-31g*", "CL": "6-31+g*"}, aux_basis_set={"c": "rimp2-cc-pvdz", "H": "rimp2-cc-pvdz", "Cl": "rimp2-aug-cc-pvdz"}) self.assertEqual(str(qctask), ans_gen) self.elementary_io_verify(ans_gen, qctask) qctask.set_auxiliary_basis_set([("C", "aug-cc-pvdz"), ("H", "cc-pvdz"), ("H", "cc-pvdz"), ("H", "cc-pvdz"), ("cl", "rimp2-aug-cc-pvdz")]) ans_mixed_aux = """$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = freq exchange = xygjos basis = gen aux_basis = mixed $end $aux_basis C 1 aug-cc-pvdz **** H 2 cc-pvdz **** H 3 cc-pvdz **** H 4 cc-pvdz **** Cl 5 rimp2-aug-cc-pvdz **** $end $basis C 6-31g* **** Cl 6-31+g* **** H 6-31g* **** $end """ self.assertEqual(ans_mixed_aux, str(qctask)) self.elementary_io_verify(ans_mixed_aux, qctask) qctask.set_basis_set("6-31+G*") qctask.set_auxiliary_basis_set("rimp2-cc-pvdz") ans_simple = """$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = freq exchange = xygjos basis = 6-31+g* aux_basis = rimp2-cc-pvdz $end """ self.assertEqual(ans_simple, str(qctask)) self.elementary_io_verify(ans_simple, qctask) qctask.set_basis_set({"C": "6-31G*", "h": "6-31g*", "CL": "6-31+g*"}) qctask.set_auxiliary_basis_set([("C", "aug-cc-pvdz"), ("H", "cc-pvdz"), ("H", "cc-pvdz"), ("H", "cc-pvdz"), ("cl", "rimp2-aug-cc-pvdz")]) self.assertEqual(ans_mixed_aux, str(qctask)) self.elementary_io_verify(ans_mixed_aux, qctask) def test_ecp_str(self): ans = '''$comment Test ECP $end $molecule 0 1 Br 0.00000000 0.00000000 -2.40000000 Cd 0.00000000 0.00000000 0.00000000 Br 0.00000000 0.00000000 2.40000000 $end $rem jobtype = opt exchange = b3lyp basis = gen ecp = gen $end $basis Br srlc **** Cd srsc **** $end $ecp Br srlc **** Cd srsc **** $end ''' qctask = QcTask(heavy_mol, title="Test ECP", exchange="B3LYP", jobtype="Opt", basis_set={"Br": "srlc", "Cd": "srsc"}, ecp={"Br": "SrlC", "Cd": "srsc"}) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_set_memory(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* mem_static = 500 mem_total = 18000 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_memory(total=18000, static=500) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_qc42_pcm_solvent_format(self): text = '''$molecule -1 2 N -0.00017869 0.00010707 0.20449990 H 0.89201838 0.20268122 -0.29656572 H -0.62191133 0.67135171 -0.29649162 H -0.26987729 -0.87406458 -0.29659779 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* solvent_method = pcm $end $pcm theory ssvpe vdwscale 1.1 $end $pcm_solvent dielectric 78.3553 $end ''' qctask_qc41 = QcTask.from_string(text) qctask_qc42 = copy.deepcopy(qctask_qc41) solvent_params = qctask_qc42.params.pop("pcm_solvent") qctask_qc42.params["solvent"] = solvent_params ans = '''$molecule -1 2 N -0.00017869 0.00010707 0.20449990 H 0.89201838 0.20268122 -0.29656572 H -0.62191133 0.67135171 -0.29649162 H -0.26987729 -0.87406458 -0.29659779 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* solvent_method = pcm $end $pcm theory ssvpe vdwscale 1.1 $end $solvent dielectric 78.3553 $end ''' self.assertEqual(str(qctask_qc42), ans) self.elementary_io_verify(ans, qctask_qc42) def test_set_max_num_of_scratch_files(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* max_sub_file_num = 500 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_max_num_of_scratch_files(500) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_set_max_scf_iterations(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* max_scf_cycles = 100 scf_algorithm = diis_gdm $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_scf_algorithm_and_iterations(algorithm="diis_gdm", iterations=100) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_set_scf_convergence_threshold(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* scf_convergence = 8 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_scf_convergence_threshold(exponent=8) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_set_integral_threshold(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* thresh = 14 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_integral_threshold(thresh=14) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_set_dft_grid(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* xc_grid = 000110000590 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_dft_grid(radical_points=110, angular_points=590) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_set_scf_initial_guess(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* scf_guess = gwh $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_scf_initial_guess("GWH") self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_geom_opt_max_cycles(self): ans = '''$comment Test Methane $end $molecule 1 2 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* geom_opt_max_cycles = 100 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", charge=1, spin_multiplicity=2, basis_set="6-31+G*") qctask.set_geom_max_iterations(100) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_set_geom_opt_coords_type(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* geom_opt_coords = 0 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_geom_opt_coords_type("cartesian") self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_scale_geom_opt_threshold(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* geom_opt_tol_displacement = 120 geom_opt_tol_energy = 10 geom_opt_tol_gradient = 30 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.scale_geom_opt_threshold(gradient=0.1, displacement=0.1, energy=0.1) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_set_geom_opt_use_gdiis(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* geom_opt_max_diis = -1 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.set_geom_opt_use_gdiis() self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_disable_symmetry(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* sym_ignore = True symmetry = False $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.disable_symmetry() self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_use_cosmo(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* solvent_dielectric = 35.0 solvent_method = cosmo $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.use_cosmo(dielectric_constant=35.0) self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_wrap_comment(self): ans = '''$comment 5_2_2_methoxyethoxy_ethoxy_6_nitro_1_3_dihydro_2_1_3_benzothiadiazole singlet neutral B3lYP/6-31+G* geometry optimization $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* $end ''' qctask = QcTask(mol, title=" 5_2_2_methoxyethoxy_ethoxy_6_nitro_1_3_dihydro_2_1_3_benzothiadiazole singlet " "neutral B3lYP/6-31+G* geometry optimization", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) title = ''' MgBPh42 singlet neutral PBE-D3/6-31+G* geometry optimization <SCF Fix Strategy>{ "current_method_id": 1, "methods": [ "increase_iter", "diis_gdm", "gwh", "rca", "gdm", "core+gdm" ] }</SCF Fix Strategy>''' ans = '''$comment MgBPh42 singlet neutral PBE-D3/6-31+G* geometry optimization <SCF Fix Strategy>{ "current_method_id": 1, "methods": [ "increase_iter", "diis_gdm", "gwh", "rca", "gdm", "core+gdm" ] }</SCF Fix Strategy> $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* $end ''' qctask = QcTask(mol, title=title, exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) title = " 5_2_2_methoxyethoxy_ethoxy_6_nitro_1_3_dihydro_2_1_3_benzothiadiazole singlet neutral " \ "B3lYP/6-31+G* geometry optimization" + \ '''<SCF Fix Strategy>{ "current_method_id": 1, "methods": [ "increase_iter", "diis_gdm", "gwh", "rca", "gdm", "core+gdm" ] }</SCF Fix Strategy>''' qctask = QcTask(mol, title=title, exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") self.elementary_io_verify(str(qctask), qctask) def test_use_pcm_qc41(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* solvent_method = pcm $end $pcm radii uff theory ssvpe vdwscale 1.1 $end $pcm_solvent dielectric 78.3553 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.use_pcm(solvent_key="pcm_solvent") self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.use_pcm(pcm_params={"Radii": "FF", "Theory": "CPCM", "SASrad": 1.5, "HPoints": 1202}, solvent_params={"Dielectric": 20.0, "Temperature": 300.75, "NSolventAtoms": 2, "SolventAtom": [[8, 1, 186, 1.30], [1, 2, 187, 1.01]]}, radii_force_field="OPLSAA", solvent_key="pcm_solvent") ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* force_fied = oplsaa solvent_method = pcm $end $pcm hpoints 1202 radii bondi sasrad 1.5 theory cpcm vdwscale 1.1 $end $pcm_solvent dielectric 20.0 nsolventatoms 2 solventatom 8 1 186 1.30 solventatom 1 2 187 1.01 temperature 300.75 $end ''' self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_use_pcm_qc42(self): ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* solvent_method = pcm $end $pcm radii uff theory ssvpe vdwscale 1.1 $end $solvent dielectric 78.3553 $end ''' qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.use_pcm() self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) qctask = QcTask(mol, title="Test Methane", exchange="B3LYP", jobtype="SP", basis_set="6-31+G*") qctask.use_pcm(pcm_params={"Radii": "FF", "Theory": "CPCM", "SASrad": 1.5, "HPoints": 1202}, solvent_params={"Dielectric": 20.0, "Temperature": 300.75, "NSolventAtoms": 2, "SolventAtom": [[8, 1, 186, 1.30], [1, 2, 187, 1.01]]}, radii_force_field="OPLSAA") ans = '''$comment Test Methane $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* force_fied = oplsaa solvent_method = pcm $end $pcm hpoints 1202 radii bondi sasrad 1.5 theory cpcm vdwscale 1.1 $end $solvent dielectric 20.0 nsolventatoms 2 solventatom 8 1 186 1.30 solventatom 1 2 187 1.01 temperature 300.75 $end ''' self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) def test_ghost_atoms(self): qctask = QcTask(mol, charge=0, spin_multiplicity=1, exchange="B3LYP", ghost_atoms=[2, 4]) ans = """$molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 @H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 @Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = sp exchange = b3lyp basis = 6-31+g* $end """ self.assertEqual(str(qctask), ans) self.elementary_io_verify(ans, qctask) mol1 = copy.deepcopy(mol) mol1.set_charge_and_spin(1, 2) mol2 = copy.deepcopy(water_mol) mol2.set_charge_and_spin(-1, 2) qctask = QcTask([mol1, mol2], title="Test Fragments", exchange="B3LYP", jobtype="bsse", charge=0, spin_multiplicity=3, basis_set="6-31++G**", ghost_atoms=[1, 2, 3, 5]) self.elementary_io_verify(str(qctask), qctask) qctask = QcTask(mol, charge=0, spin_multiplicity=2, exchange="B3LYP", ghost_atoms=[2]) self.assertEqual(qctask.spin_multiplicity, 2) class TestQcInput(PymatgenTest): def test_str_and_from_string(self): ans = '''$comment Test Methane Opt $end $molecule 0 1 C 0.00000000 0.00000000 0.00000000 H 0.00000000 0.00000000 1.08900000 H 1.02671900 0.00000000 -0.36300000 H -0.51336000 -0.88916500 -0.36300000 Cl -0.51336000 0.88916500 -0.36300000 $end $rem jobtype = opt exchange = b3lyp basis = 6-31+g* $end @@@ $comment Test Methane Frequency $end $molecule read $end $rem jobtype = freq exchange = b3lyp basis = 6-31+g* $end @@@ $comment Test Methane Single Point Energy $end $molecule read $end $rem jobtype = sp exchange = b3lyp basis = 6-311+g(3df,2p) $end ''' qctask1 = QcTask(mol, title="Test Methane Opt", exchange="B3LYP", jobtype="Opt", basis_set="6-31+G*") qctask2 = QcTask(molecule="read", title="Test Methane Frequency", exchange="B3LYP", jobtype="Freq", basis_set="6-31+G*") qctask3 = QcTask(title="Test Methane Single Point Energy", exchange="B3LYP", jobtype="SP", basis_set="6-311+G(3df,2p)") qcinp1 = QcInput(jobs=[qctask1, qctask2, qctask3]) self.assertEqual(str(qcinp1), ans) qcinp2 = QcInput.from_string(ans) self.assertEqual(qcinp1.as_dict(), qcinp2.as_dict()) qcinp_mgbf4 = QcInput.from_file(os.path.join(test_dir, "MgBF4_b_overalpped.qcinp")) self.assertEqual(qcinp_mgbf4.jobs[0].ghost_atoms, [0]) def test_to_and_from_dict(self): qctask1 = QcTask(mol, title="Test Methane Opt", exchange="B3LYP", jobtype="Opt", basis_set="6-31+G*") qctask2 = QcTask(molecule="read", title="Test Methane Frequency", exchange="B3LYP", jobtype="Freq", basis_set="6-31+G*") qctask3 = QcTask(title="Test Methane Single Point Energy", exchange="B3LYP", jobtype="SP", basis_set="6-311+G(3df,2p)") qcinp1 = QcInput(jobs=[qctask1, qctask2, qctask3]) d1 = qcinp1.as_dict() qcinp2 = QcInput.from_dict(d1) d2 = qcinp2.as_dict() self.assertEqual(d1, d2) class TestQcOutput(PymatgenTest): def test_energy(self): ref_energies_text = ''' { "hf-rimp2.qcout": { "RIMP2": -2726.6860779805256, "SCF": -2721.541435904716 }, "hf_b3lyp.qcout": { "SCF": -2733.1747178920828 }, "hf_ccsd(t).qcout": { "CCSD": -2726.7627121001865, "CCSD(T)": -2726.8283514003333, "MP2": -2726.685664155242, "SCF": -2721.5414360843106 }, "hf_cosmo.qcout": { "SCF": -2721.1752937496067 }, "hf_hf.qcout": { "SCF": -2721.541435904716 }, "hf_lxygjos.qcout": { "SCF": -2724.0769973875713, "XYGJ-OS": -2726.3445157759393 }, "hf_mosmp2.qcout": { "MOS-MP2": -2725.302538779482, "SCF": -2721.541435904716 }, "hf_mp2.qcout": { "MP2": -2726.685661962005, "SCF": -2721.541435904716 }, "hf_pcm.qcout": { "SCF": -2720.703940318968 }, "hf_qcisd(t).qcout": { "QCISD": -2726.7853751012344, "QCISD(T)": -2726.8346541282745, "SCF": -2721.5414360843106 }, "hf_riccsd(t).qcout": { "CCSD": -2726.7641790658904, "CCSD(T)": -2726.829853468723, "MP2": -2726.6860802173014, "SCF": -2721.5414360843106 }, "hf_tpssh.qcout": { "SCF": -2732.938974944255 }, "hf_xyg3.qcout": { "SCF": -2728.769906036435, "XYG3": -2731.0640917605806 }, "hf_xygjos.qcout": { "SCF": -2724.0769973875713, "XYGJ-OS": -2726.3447230967517 } }''' ref_energies = json.loads(ref_energies_text) parsed_energies = dict() # noinspection PyUnresolvedReferences for filename in glob.glob(os.path.join(test_dir, "qchem_energies", "*.qcout")): molname = os.path.basename(filename) qcout = QcOutput(filename) d = dict(qcout.data[0]["energies"]) parsed_energies[molname] = d self.assertEqual(sorted(ref_energies.keys()), sorted(parsed_energies.keys())) mols = sorted(ref_energies.keys()) for molname in mols: self.assertEqual(sorted(ref_energies[molname].keys()), sorted(parsed_energies[molname].keys())) methods = sorted(ref_energies[molname].keys()) for method in methods: self.assertAlmostEqual(ref_energies[molname][method], parsed_energies[molname][method], 2) def test_unable_to_determine_lambda_in_geom_opt(self): filename = os.path.join(test_dir, "unable_to_determine_lambda_in_geom_opt.qcout") qcout = QcOutput(filename) self.assertTrue(qcout.data[0]['has_error']) self.assertEqual(qcout.data[0]['errors'], ['Lamda Determination Failed', 'Geometry optimization failed']) def test_geom_opt(self): filename = os.path.join(test_dir, "thiophene_wfs_5_carboxyl.qcout") qcout = QcOutput(filename) self.assertEqual(qcout.data[0]["jobtype"], "opt") ans_energies = [(u'SCF', -20179.886441383995), (u'SCF', -20180.12187218424), (u'SCF', -20180.150524404988), (u'SCF', -20180.151628362753), (u'SCF', -20180.151810235497), (u'SCF', -20180.15180854295)] self.assertEqual(qcout.data[0]["energies"], ans_energies) ans_mol1 = '''Full Formula (H4 C5 S1 O2) Reduced Formula: H4C5SO2 Charge = -1, Spin Mult = 2 Sites (12) 0 C 0.158839 -0.165379 0.000059 1 C -0.520531 -1.366720 0.000349 2 C -1.930811 -1.198460 -0.000041 3 C -2.297971 0.127429 -0.000691 4 S -0.938312 1.189630 0.000400 5 H -0.014720 -2.325340 0.000549 6 H -2.641720 -2.017721 -0.000161 7 H -3.301032 0.535659 -0.001261 8 C 1.603079 0.076231 -0.000101 9 O 2.131988 1.173581 -0.000330 10 O 2.322109 -1.079218 -0.000021 11 H 3.262059 -0.820188 -0.000171''' ans_mol_last = '''Full Formula (H4 C5 S1 O2) Reduced Formula: H4C5SO2 Charge = -1, Spin Mult = 2 Sites (12) 0 C 0.194695 -0.158362 -0.001887 1 C -0.535373 -1.381241 -0.001073 2 C -1.927071 -1.199274 -0.000052 3 C -2.332651 0.131916 0.000329 4 S -0.942111 1.224916 -0.001267 5 H -0.038260 -2.345185 -0.001256 6 H -2.636299 -2.025939 0.000620 7 H -3.339756 0.529895 0.001288 8 C 1.579982 0.071245 -0.002733 9 O 2.196383 1.165675 -0.000178 10 O 2.352341 -1.114671 0.001634 11 H 3.261096 -0.769470 0.003158''' self.assertEqual(qcout.data[0]["molecules"][0].__str__(), ans_mol1) self.assertEqual(str(qcout.data[0]["molecules"][-1]), ans_mol_last) self.assertFalse(qcout.data[0]["has_error"]) ans_gradient = [{'max_gradient': 0.07996, 'gradients': [(-0.0623076, -0.0157774, -2.05e-05), (0.0260287, 0.0289157, -6e-06), (-0.015738, 0.0103583, 1.87e-05), (0.0260219, -0.0028, -1.36e-05), (-0.0043158, -0.0245896, 2.83e-05), (4.8e-05, 0.000782, 1.3e-06), (0.0014679, 0.0020277, 3.9e-06), (0.0010437, -1.29e-05, -1.04e-05), (0.0799585, 0.0204159, 1e-06), (-0.0320357, -0.0421461, 2.1e-06), (-0.0237691, 0.0247526, -4.6e-06), (0.0035975, -0.0019264, -3e-07)], 'rms_gradient': 0.02244}, {'max_gradient': 0.02721, 'gradients': [(-0.0195677, -0.0008468, -3.2e-06), (0.0106798, 0.0039494, 1.11e-05), (-0.0086473, -0.0012624, -8.1e-06), (0.0065018, 0.0033749, 5e-07), (0.0002581, -0.0060831, 7.2e-06), (-0.0004373, -0.000504, 1.4e-06), (0.0003216, 0.0001059, -9e-07), (-0.000814, -5.03e-05, 3e-07), (0.0272109, 0.001408, -2.06e-05), (-0.0086971, -0.009251, 8.3e-06), (-0.0080925, 0.0112191, 2.9e-06), (0.0012838, -0.0020597, 1.1e-06)], 'rms_gradient': 0.007037}, {'max_gradient': 0.003444, 'gradients': [(0.0021606, 0.0013094, -1.68e-05), (0.0005757, -0.0002616, -1e-05), (2.73e-05, -0.0002868, 1.5e-05), (0.0001088, 0.0006944, -1.23e-05), (0.0006912, -0.0006523, 6.1e-06), (-0.0004191, -9.32e-05, -1.3e-06), (0.0002288, 3.98e-05, 1.8e-06), (-8.99e-05, -0.0002338, -3.2e-06), (1.95e-05, -0.0034439, 7.08e-05), (-0.0008228, -9.18e-05, -2.77e-05), (-0.0018054, 0.0034031, -2.21e-05), (-0.0006747, -0.0003834, -3e-07)], 'rms_gradient': 0.001008}, {'max_gradient': 0.002367, 'gradients': [(-0.0001646, 0.0006149, 4.17e-05), (-0.0004516, -0.0003116, 1.28e-05), (0.0003366, -3.27e-05, -1.59e-05), (-0.0003164, 0.0001775, 1.37e-05), (0.0001399, -0.0001201, -6.9e-06), (-0.0001374, -1.58e-05, 9e-07), (-1.19e-05, -3.93e-05, -3.3e-06), (-1.76e-05, -0.0001233, 5.1e-06), (9.73e-05, -0.0023668, -0.0001609), (0.0006998, 0.0009023, 6.31e-05), (-0.0002169, 0.0014874, 4.95e-05), (4.28e-05, -0.0001724, 2e-07)], 'rms_gradient': 0.0005339}, {'max_gradient': 0.001246, 'gradients': [(-6.88e-05, 0.0001757, -8.32e-05), (-0.0002264, -0.0001306, -1.93e-05), (0.0001526, -1.39e-05, 2.05e-05), (-0.0001401, 3.8e-06, -2.05e-05), (1.52e-05, 0.0001152, 8e-06), (2.01e-05, -3.69e-05, -1e-06), (-3.62e-05, -3.51e-05, 5.5e-06), (1.01e-05, -1.23e-05, -6.8e-06), (9.73e-05, -0.0012462, 0.0003246), (0.0003926, 0.0008331, -0.0001269), (-0.0002294, 0.000281, -0.0001009), (1.3e-05, 6.61e-05, 0.0)], 'rms_gradient': 0.0002814}, {'max_gradient': 0.0006359, 'gradients': [(0.0001036, -0.0001339, 0.0001633), (0.0001003, 6.98e-05, 3.43e-05), (-8.28e-05, 1.1e-05, -3.31e-05), (6.2e-05, -0.0001068, 3.41e-05), (-5.02e-05, 0.0001346, -1.18e-05), (8.72e-05, -7.3e-06, 1.5e-06), (-1.7e-05, 4.9e-06, -1.05e-05), (1.29e-05, 5.9e-05, 1.26e-05), (-0.0001059, -5.4e-06, -0.0006359), (-1.48e-05, 0.0002152, 0.0002469), (-0.0001335, -0.0003534, 0.0001988), (3.83e-05, 0.0001124, -1e-07)], 'rms_gradient': 0.0001535}] self.assertEqual(qcout.data[0]["gradients"], ans_gradient) ans_inp = '''$molecule -1 2 C 0.15884000 -0.16538000 0.00006000 C -0.52053000 -1.36672000 0.00035000 C -1.93081000 -1.19846000 -0.00004000 C -2.29797000 0.12743000 -0.00069000 S -0.93831000 1.18963000 0.00040000 H -0.01472000 -2.32534000 0.00055000 H -2.64172000 -2.01772000 -0.00016000 H -3.30103000 0.53566000 -0.00126000 C 1.60308000 0.07623000 -0.00010000 O 2.13199000 1.17358000 -0.00033000 O 2.32211000 -1.07922000 -0.00002000 H 3.26206000 -0.82019000 -0.00017000 $end $rem jobtype = opt exchange = b3lyp basis = 6-31+g* $end ''' self.assertEqual(str(qcout.data[0]['input']), ans_inp) self.assertTrue(qcout.data[0]['gracefully_terminated']) ans_scf_iter = [[(-743.3130310589, 0.0561), (-741.3557302205, 0.00841), (-740.7031048846, 0.0157), (-741.5589873953, 0.00303), (-741.5918010434, 0.00118), (-741.5966923809, 0.000332), (-741.5970287119, 0.000158), (-741.5971282029, 4.38e-05), (-741.5971448077, 2.17e-05), (-741.5971501973, 7.7e-06), (-741.5971533576, 5.05e-06), (-741.5971541122, 2.7e-06), (-741.5971544119, 9.48e-07), (-741.5971544408, 2.61e-07), (-741.5971544436, 1.21e-07), (-741.5971544441, 5.45e-08), (-741.5971544442, 1.77e-08), (-741.5971544442, 7.79e-09)], [(-741.5552794274, 0.00265), (-741.6048574279, 0.000515), (-741.6037290502, 0.000807), (-741.6056978336, 0.000188), (-741.6057976553, 4.78e-05), (-741.6058045572, 1.54e-05), (-741.6058057373, 4.51e-06), (-741.6058061671, 2.91e-06), (-741.6058062822, 8.32e-07), (-741.6058063435, 7.17e-07), (-741.6058063636, 1.97e-07), (-741.6058063662, 5.03e-08), (-741.6058063666, 3.35e-08), (-741.6058063666, 1.24e-08), (-741.6058063666, 5.25e-09)], [(-741.6023833754, 0.0013), (-741.6065067966, 0.000305), (-741.6057886337, 0.000559), (-741.6068434004, 7.61e-05), (-741.6068555361, 3.4e-05), (-741.6068589376, 5.66e-06), (-741.6068591778, 2.95e-06), (-741.60685927, 1.27e-06), (-741.6068592962, 4.82e-07), (-741.6068593106, 3.84e-07), (-741.6068593157, 9.23e-08), (-741.6068593162, 2.49e-08), (-741.6068593163, 1.52e-08), (-741.6068593163, 5.71e-09)], [(-741.6012175391, 0.000209), (-741.6068794773, 7.2e-05), (-741.606851035, 0.000117), (-741.606899078, 1.53e-05), (-741.6068997567, 6.01e-06), (-741.6068998747, 1.68e-06), (-741.6068998849, 5.32e-07), (-741.6068998857, 2.76e-07), (-741.606899886, 6.41e-08), (-741.606899886, 3.08e-08), (-741.606899886, 9.5e-09)], [(-741.6067290885, 0.0001), (-741.6069044268, 2.64e-05), (-741.6068991026, 5.29e-05), (-741.6069065234, 3.51e-06), (-741.6069065452, 2.49e-06), (-741.6069065686, 3.57e-07), (-741.6069065693, 2.59e-07), (-741.6069065696, 7.05e-08), (-741.6069065696, 4.44e-08), (-741.6069065697, 1.52e-08), (-741.6069065697, 8.17e-09)], [(-741.6074251344, 0.000129), (-741.6069044127, 2.43e-05), (-741.6068998551, 4.95e-05), (-741.6069064294, 4.49e-06), (-741.606906478, 2.77e-06), (-741.6069065049, 5.85e-07), (-741.6069065068, 2.74e-07), (-741.6069065073, 6.99e-08), (-741.6069065074, 3.37e-08), (-741.6069065075, 1.89e-08), (-741.6069065075, 7.38e-09)]] self.assertEqual(qcout.data[0]['scf_iteration_energies'], ans_scf_iter) def test_multiple_step_job(self): filename = os.path.join(test_dir, "CdBr2.qcout") qcout = QcOutput(filename) self.assertEqual(len(qcout.data), 3) self.assertEqual(qcout.data[0]['jobtype'], 'opt') self.assertEqual(qcout.data[1]['jobtype'], 'freq') ans_thermo_corr_text = ''' { "Rotational Enthalpy": 0.025714259, "Rotational Entropy": 0.000833523586, "Total Enthalpy": 0.199729978, "Total Entropy": 0.003218965579, "Translational Enthalpy": 0.038549707, "Translational Entropy": 0.001851513374, "Vibrational Enthalpy": 0.109795116, "Vibrational Entropy": 0.000533928619, "ZPE": 0.039330241, "Zero point vibrational energy": 0.039330241, "gas constant (RT)": 0.025714259 }''' ans_thermo_corr = json.loads(ans_thermo_corr_text) self.assertEqual(sorted(qcout.data[1]['corrections'].keys()), sorted(ans_thermo_corr.keys())) for k, ref in ans_thermo_corr.items(): self.assertAlmostEqual(qcout.data[1]['corrections'][k], ref) self.assertEqual(len(qcout.data[1]['molecules']), 1) ans_mol1 = '''Full Formula (Cd1 Br2) Reduced Formula: CdBr2 Charge = 0, Spin Mult = 1 Sites (3) 0 Br 0.000000 0.000000 -2.453720 1 Cd 0.000000 0.000000 0.000000 2 Br 0.000000 0.000000 2.453720''' self.assertEqual(str(qcout.data[1]['molecules'][0]), ans_mol1) self.assertFalse(qcout.data[1]['has_error']) self.assertEqual(qcout.data[1]['gradients'], []) ans_inp = '''$molecule read $end $rem jobtype = freq exchange = b3lyp basis = gen ecp = gen max_scf_cycles = 100 scf_guess = gwh $end $basis Br srlc **** Cd srsc **** $end $ecp Br srlc **** Cd srsc **** $end ''' self.assertEqual(str(qcout.data[1]['input']), ans_inp) ans_freq = [{'vib_mode': ((0.17, -0.475, 0.0), (-0.236, 0.659, 0.0), (0.17, -0.475, 0.0)), 'frequency': 61.36}, {'vib_mode': ((-0.475, -0.17, 0.0), (0.659, 0.236, 0.0), (-0.475, -0.17, 0.0)), 'frequency': 61.36}, {'vib_mode': ((0.0, 0.0, 0.707), (0.0, 0.0, 0.0), (0.0, 0.0, -0.707)), 'frequency': 199.94}, {'vib_mode': ((0.0, 0.0, -0.505), (0.0, 0.0, 0.7), (0.0, 0.0, -0.505)), 'frequency': 311.74}] self.assertEqual(qcout.data[1]['frequencies'], ans_freq) self.assertAlmostEqual(qcout.data[2]['energies'][0][1], -5296.720741780598, 5) ans_scf_iter_ene = [[(-176.9147092199, 0.779), (-156.8236033975, 0.115), (-152.9396694452, 0.157), (-183.2743425778, 0.138), (-182.2994943574, 0.142), (-181.990425533, 0.143), (-182.1690180647, 0.142), (-106.6454708618, 0.239), (-193.8056267625, 0.0432), (-193.0854096948, 0.0455), (-194.6340538334, 0.0062), (-194.6495072245, 0.00205), (-194.6508787796, 0.000189), (-194.6508984743, 2.18e-05), (-194.6508986262, 2.17e-06)]] self.assertEqual(qcout.data[2]['scf_iteration_energies'], ans_scf_iter_ene) def test_solvent_method(self): filename = os.path.join(test_dir, "thiophene_wfs_5_carboxyl.qcout") qcout = QcOutput(filename) self.assertEqual(qcout.data[0]["solvent_method"], "NA") filename = os.path.join(test_dir, "qchem_energies", "hf_cosmo.qcout") qcout = QcOutput(filename) self.assertEqual(qcout.data[0]["solvent_method"], "cosmo") filename = os.path.join(test_dir, "qchem_energies", "hf_pcm.qcout") qcout = QcOutput(filename) self.assertEqual(qcout.data[0]["solvent_method"], "pcm") def test_failed_message(self): scf_file = os.path.join(test_dir, "hf.qcout") scf_qcout = QcOutput(scf_file) self.assertTrue(scf_qcout.data[0]['has_error']) self.assertEqual(scf_qcout.data[0]['errors'], ['Bad SCF convergence', 'Molecular charge is not found', 'Geometry optimization failed']) geom_file = os.path.join(test_dir, "hf_opt_failed.qcout") geom_qcout = QcOutput(geom_file) self.assertTrue(geom_qcout.data[0]['has_error']) self.assertEqual(geom_qcout.data[0]['errors'], ['Geometry optimization failed']) def test_abnormal_exit(self): no_reading_file = os.path.join(test_dir, "no_reading.qcout") no_reading_qcout = QcOutput(no_reading_file) self.assertTrue(no_reading_qcout.data[0]['has_error']) self.assertEqual(no_reading_qcout.data[0]['errors'], ['Exit Code 134', 'Molecular charge is not found', 'No input text', 'Bad SCF convergence']) exit_code_134_file = os.path.join(test_dir, "exit_code_134.qcout") ec134_qcout = QcOutput(exit_code_134_file) self.assertTrue(ec134_qcout.data[0]['has_error']) self.assertEqual(ec134_qcout.data[0]['errors'], ['Exit Code 134', 'Molecular charge is not found', 'Bad SCF convergence']) def test_chelp_and_mulliken_charges(self): filename = os.path.join(test_dir, 'chelpg_charges.qcout') qcout = QcOutput(filename) mulliken_charges = [0.393961, -0.281545, 0.066432, 0.019364, -0.186041, -0.16007, 0.315659, 0.30631, 0.064257, 0.056438, -0.17695, 0.16976, -0.13326, -0.131853, -0.178711, 0.163697, 0.170148, 0.143329, 0.152702, 0.152929, 0.170475, -0.451542, -0.441554, -0.709834, -0.592718, 0.20506, 0.211043, 0.204389, 0.546173, -0.414558, 0.346511] self.assertEqual(qcout.data[0]['charges']['mulliken'], mulliken_charges) chelpg_charges = [0.399404, -0.277179, -0.057502, -0.110085, -0.07107, -0.274987, 0.475781, 0.423117, -0.054079, -0.101424, -0.05793, 0.115179, -0.116069, -0.10949, -0.06664, 0.161442, 0.135438, 0.158081, 0.125881, 0.125324, 0.115863, -0.425251, -0.42309, -0.602375, -0.458844, 0.140267, 0.139084, 0.139995, 0.698011, -0.487911, 0.341061] self.assertEqual(qcout.data[0]['charges']['chelpg'], chelpg_charges) def test_no_message_scf_opt_fail(self): so_failfile = os.path.join(test_dir, 'scf_opt_no_message_fail.qcout') so_failqcout = QcOutput(so_failfile) self.assertTrue(so_failqcout.data[0]['has_error']) self.assertEqual(so_failqcout.data[0]['errors'], ['Exit Code 134', 'Molecular charge is not found', 'Bad SCF convergence', 'Geometry optimization failed']) o_failfile = os.path.join(test_dir, 'opt_fail_no_message.qcout') o_failqcout = QcOutput(o_failfile) self.assertEqual(o_failqcout.data[0]['errors'], ['Geometry optimization failed']) s_failfile = os.path.join(test_dir, 'scf_no_message_fail.qcout') s_failqcout = QcOutput(s_failfile) self.assertEqual(s_failqcout.data[0]['errors'], ['Exit Code 134', 'Molecular charge is not found', 'Bad SCF convergence']) so_successfile = os.path.join(test_dir, 'thiophene_wfs_5_carboxyl.qcout') so_successqcout = QcOutput(so_successfile) self.assertFalse(so_successqcout.data[0]['has_error']) def test_negative_eigen(self): filename = os.path.join(test_dir, "negative_eigen.qcout") qcout = QcOutput(filename) self.assertTrue(qcout.data[0]['has_error']) self.assertEqual(qcout.data[0]["errors"], ['Negative Eigen', 'Molecular charge is not found', 'Bad SCF convergence', 'Geometry optimization failed']) def test_insufficient_memory(self): filename = os.path.join(test_dir, "insufficient_memory.qcout") qcout = QcOutput(filename) self.assertTrue(qcout.data[0]['has_error']) self.assertEqual(qcout.data[0]['errors'], ['Insufficient static memory', 'Molecular charge is not found', 'Bad SCF convergence', 'Geometry optimization failed']) def test_freq_seg_too_small(self): filename = os.path.join(test_dir, "freq_seg_too_small.qcout") qcout = QcOutput(filename) self.assertTrue(qcout.data[0]['has_error']) self.assertEqual(qcout.data[0]['errors'], ['Freq Job Too Small', 'Exit Code 134']) def test_not_enough_total_memory(self): filename = os.path.join(test_dir, "not_enough_total_memory.qcout") qcout = QcOutput(filename) self.assertTrue(qcout.data[1]['has_error']) self.assertEqual(qcout.data[1]["errors"], ['Not Enough Total Memory', 'Exit Code 134']) def test_killed(self): filename = os.path.join(test_dir, "killed.qcout") qcout = QcOutput(filename) self.assertFalse(qcout.data[0]["has_error"]) self.assertTrue(qcout.data[1]["has_error"]) self.assertEqual(qcout.data[1]["errors"], ['Killed', 'Molecular charge is not found', 'Bad SCF convergence']) def test_gdm_scf(self): filename = os.path.join(test_dir, "gmd_scf.qcout") qcout = QcOutput(filename) self.assertTrue(qcout.data[0]['has_error']) self.assertEqual(qcout.data[0]['errors'], ['Exit Code 134', 'Bad SCF convergence', 'Geometry optimization failed']) self.assertEqual(len(qcout.data[0]['scf_iteration_energies']), 2) self.assertEqual(len(qcout.data[0]['scf_iteration_energies'][-1]), 192) self.assertAlmostEqual(qcout.data[0]['scf_iteration_energies'][-1][-1][0], -1944.945908459, 5) def test_crazy_scf_values(self): filename = os.path.join(test_dir, "crazy_scf_values.qcout") qcout = QcOutput(filename) ans = [(-28556254.06737586, 6.49e-06), (-28556254.067382727, 9.45e-06), (-28556254.067382865, 6.14e-06)] self.assertEqual(qcout.data[0]["scf_iteration_energies"][-1][-3:], ans) def test_crowd_gradient_number(self): filename = os.path.join(test_dir, "crowd_gradient_number.qcout") qcout = QcOutput(filename) self.assertEqual(qcout.data[0]['gradients'][0]['gradients'], [(-0.0307525, 0.0206536, -0.0396255), (0.0008938, -0.000609, 0.0082746), (0.042143, -0.0240514, 0.0380298), (-0.0843578, 0.0002757, 0.0884924), (0.0356689, -0.0444656, -0.0710646), (-0.0190554, -0.0308886, -0.0297994), (0.0470543, -0.0263915, -0.0690973), (-0.0297801, 0.0296872, -0.0104344), (0.0504581, -0.0014272, 0.0262245), (-0.0927323, 0.0750046, 0.0128003), (0.0183242, -0.0084638, 0.0127388), (-0.0083989, 0.0111579, -0.0002461), (-0.0316941, 267.34455, 878.3493251), (0.017459, 0.0487124, -0.0276365), (-0.3699134, 0.0110442, 0.0260809), (0.363931, 0.24044, 0.5192852), (0.026669, -0.0284192, -0.0347528), (0.0047475, 0.0049706, 0.0148794), (-0.077804, 0.003402, 0.000852), (-6772.1697035, -267.4471902, -878.585931), (-0.0029556, -0.0616073, -0.0180577), (-0.0001915, 0.0021213, 0.0006193), (0.0320436, -0.0073456, -0.01509), (0.0155112, -0.0035725, 0.0015675), (-0.0034309, 0.0170739, 0.0074455), (-0.0088735, -0.0129874, 0.0092329), (-0.0271963, -0.0258714, 0.0246954), (0.0025065, 0.0062934, 0.0209733), (0.0152829, -0.0080239, -0.018902), (0.0461304, 0.0071952, 0.0012227), (-0.0272755, -0.0280053, 0.0325455), (0.0122118, 0.027816, -0.0167773), (0.0168893, -0.0014211, 0.0039917), (-0.0048723, 0.0026667, -0.0159952), (-0.1840467, -0.1425887, -0.3235801), (0.015975, -0.0922797, 0.0640925), (0.0267234, 0.1031154, -0.0299014), (-0.0175591, 0.0081813, -0.0165425), (0.0119225, 0.0113174, 0.0154056), (0.0138491, 0.0083436, 0.0188022), (-0.0151146, -0.0015971, -0.0054462)]) def test_nbo_charges(self): filename = os.path.join(test_dir, "quinoxaline_anion.qcout") qcout = QcOutput(filename) ans = [-0.29291, -0.29807, 0.12715, 0.12715, -0.29807, -0.29291, 0.21284, 0.22287, 0.22287, 0.21284, -0.10866, -0.10866, 0.19699, -0.5602, -0.5602, 0.19699] self.assertEqual(qcout.data[0]["charges"]["nbo"], ans) filename = os.path.join(test_dir, "tfsi_nbo.qcout") qcout = QcOutput(filename) ans = [2.2274, 2.23584, -0.94183, -0.94575, -0.94719, -0.9423, 0.86201, 0.85672, -0.35698, -0.35373, -0.35782, -0.35647, -0.35646, -0.35787, -1.26555] self.assertEqual(qcout.data[0]["charges"]["nbo"], ans) filename = os.path.join(test_dir, "crowd_nbo_charges.qcout") qcout = QcOutput(filename) self.assertEqual( qcout.data[0]["charges"]["nbo"], [-0.33917, -0.6104, -0.15912, -0.17751, -0.61817, -0.3357, 0.24671, 0.19942, 0.19325, 0.2362, 0.23982, 0.21985, 0.2305, 0.20444, 0.23179, 0.20491, 0.85965, -0.59655, -0.59561, -0.14789, -0.13859, -0.32712, -0.33359, 0.21602, 0.22383, 0.2123, 0.22759, 0.2507, 0.20098, 0.18631, 0.24945, 0.19709, 0.20274, -0.34831, -0.56307, -0.14572, -0.1431, -0.55866, -0.3572, 0.22695, 0.21983, 0.1963, 0.20977, 0.22298, 0.20875, 0.21081, 0.19586, 0.24708, 0.20067, -0.34288, -0.55793, -0.16806, -0.15609, -0.56464, -0.34695, 0.22555, 0.20417, 0.206, 0.20825, 0.22409, 0.25415, 0.20977, 0.18976, 0.24647, 0.1993, -0.33605, -0.59395, -0.15985, -0.18024, -0.60646, -0.32742, 0.22909, 0.19347, 0.21872, 0.2203, 0.23518, 0.25185, 0.23523, 0.18666, 0.22737, 0.2205, -0.35902, -0.56138, -0.14552, -0.14903, -0.55491, -0.3493, 0.22826, 0.21789, 0.19075, 0.20898, 0.21343, 0.21715, 0.20794, 0.19695, 0.2429, 0.18482, -0.33943, -0.55659, -0.16437, -0.14503, -0.56155, -0.34131, 0.22339, 0.20483, 0.19376, 0.23395, 0.20784, 0.2096, 0.21945, 0.19192, 0.23089, 0.20493, -0.32963, -0.56949, -0.1446, -0.15244, -0.55482, -0.34848, 0.22802, 0.20471, 0.19704, 0.20744, 0.22332, 0.2206, 0.20734, 0.18871, 0.22907, 0.20741, -0.33856, -0.564, -0.16575, -0.17422, -0.56032, -0.3426, 0.22585, 0.20169, 0.20529, 0.20836, 0.21329, 0.25353, 0.23374, 0.19306, 0.23582, 0.20196, -0.34069, -0.56522, -0.17228, -0.17503, -0.55505, -0.34264, 0.22696, 0.19604, 0.20515, 0.23964, 0.2437, 0.2111, 0.21204, 0.19975, 0.2347, 0.18835, -0.34324, -0.55184, -0.16086, -0.15907, -0.56319, -0.3384, 0.23866, 0.19808, 0.19728, 0.20205, 0.24698, 0.21416, 0.20398, 0.20475, 0.2265, 0.20141, -0.34339, -0.56344, -0.14955, -0.14878, -0.55906, -0.34506, 0.23937, 0.20027, 0.19671, 0.2085, 0.21693, 0.22164, 0.20863, 0.20703, 0.22889, 0.1916]) def test_simple_aimd(self): filename = os.path.join(test_dir, "h2o_aimd.qcout") qcout = QcOutput(filename) self.assertEqual(len(qcout.data[0]["molecules"]), 11) def test_homo_lumo(self): filename = os.path.join(test_dir, "quinoxaline_anion.qcout") qcout = QcOutput(filename) for a, b in zip(qcout.data[0]["HOMO/LUMOs"][-1], [1.00682120282, 2.80277253758]): self.assertAlmostEqual(a, b, 5) filename = os.path.join(test_dir, "qchem_energies", "hf_ccsd(t).qcout") qcout = QcOutput(filename) self.assertArrayAlmostEqual(qcout.data[0]["HOMO/LUMOs"], [[-17.741823053011334, 5.224585929721129], [-17.741823053011334, 5.224585929721129]], 4) filename = os.path.join(test_dir, "crowd_gradient_number.qcout") qcout = QcOutput(filename) self.assertArrayAlmostEqual( qcout.data[0]["HOMO/LUMOs"], [[-5.741602245683116, -4.544301303455358], [-4.9796834642654515, -4.2993988379996795], [-4.761992383860404, -3.8095939070883236]], 4) def test_bsse(self): filename = os.path.join(test_dir, "bsse.qcout") qcout = QcOutput(filename) self.assertAlmostEqual(qcout.data[0]["bsse"], -0.164210762949, 5) self.assertEqual(qcout.data[0]["jobtype"], "bsse") def test_hirshfeld_charge(self): filename = os.path.join(test_dir, "hirshfeld_population.qcout") qcout = QcOutput(filename) self.assertEqual(qcout.data[0]["charges"]["hirshfeld"], [-0.286309, 0.143134, 0.143176]) self.assertFalse(qcout.data[0]["has_error"]) def test_ghost_atoms(self): filename = os.path.join(test_dir, "ghost_atoms.qcout") qcout = QcOutput(filename) elements = [a.specie.symbol for a in qcout.data[-1]["molecules"][-1].sites] self.assertEqual(elements, ['O', 'H', 'H', 'C', 'H', 'H', 'H', 'H']) filename = os.path.join(test_dir, "MgBF4_b_overalpped.qcout") qcout = QcOutput(filename) self.assertEqual(qcout.data[0]["input"].ghost_atoms, [0]) def test_final_energy(self): filename = os.path.join(test_dir, "thiophene_wfs_5_carboxyl.qcout") qcout = QcOutput(filename) self.assertEqual(qcout.final_energy, -20180.15180854295) def test_final_structure(self): filename = os.path.join(test_dir, "thiophene_wfs_5_carboxyl.qcout") qcout = QcOutput(filename) ans = '''Full Formula (H4 C5 S1 O2) Reduced Formula: H4C5SO2 Charge = -1, Spin Mult = 2 Sites (12) 0 C 0.194695 -0.158362 -0.001887 1 C -0.535373 -1.381241 -0.001073 2 C -1.927071 -1.199274 -0.000052 3 C -2.332651 0.131916 0.000329 4 S -0.942111 1.224916 -0.001267 5 H -0.038260 -2.345185 -0.001256 6 H -2.636299 -2.025939 0.000620 7 H -3.339756 0.529895 0.001288 8 C 1.579982 0.071245 -0.002733 9 O 2.196383 1.165675 -0.000178 10 O 2.352341 -1.114671 0.001634 11 H 3.261096 -0.769470 0.003158''' self.assertEqual(qcout.final_structure.__str__(), ans) def test_time_nan_values(self): filename = os.path.join(test_dir, "time_nan_values.qcout") qcout = QcOutput(filename) self.assertFalse(qcout.data[0]["has_error"]) def test_pcm_solvent_deprecated(self): filename = os.path.join(test_dir, "pcm_solvent_deprecated.qcout") qcout = QcOutput(filename) self.assertTrue(qcout.data[-1]["has_error"]) ans = ['pcm_solvent deprecated', 'Molecular charge is not found', 'No input text', 'Bad SCF convergence'] self.assertEqual(qcout.data[-1]["errors"], ans) def test_qc43_batch_job(self): filename = os.path.join(test_dir, "qchem43_batch_job.qcout") qcout = QcOutput(filename) self.assertEqual(len(qcout.data), 2) self.assertEqual(len(qcout.data[0]["scf_iteration_energies"][0]), 22) self.assertTrue("pcm_solvent deprecated" in qcout.data[1]["errors"]) def test_output_file_wierd_encoding(self): filename = os.path.join(test_dir, "ferrocenium_1pos.qcout") qcout = QcOutput(filename) self.assertFalse(qcout.data[1]["has_error"]) self.assertEqual(qcout.data[1]["frequencies"][0]["frequency"], -157.11) def test_homo_lumo_nan_values(self): filename = os.path.join(test_dir, "homo_lumo_nan_values.qcout") qcout = QcOutput(filename) self.assertTrue(qcout.data[0]["has_error"]) def test_ordinal_not_in_range(self): filename = os.path.join(test_dir, "ordinal_not_in_range.qcout.gz") qcout = QcOutput(filename) self.assertEqual(len(qcout.data), 1) def test_aux_mpi_time_in_the_end_of_job(self): filename = os.path.join(test_dir, "aux_mpi_time_mol.qcout") qcout = QcOutput(filename) self.assertEqual(len(qcout.data), 2) def test_opt(self): filename = os.path.join(test_dir, "pt_dft_180.0.qcout") qcout = QcOutput(filename) qcin = qcout.data[-1]['input'] qcin_ans = '''$molecule 0 1 S 1.82267924 -1.19997629 0.28714109 C 3.20006180 -0.17260711 0.06528466 C 2.82980603 1.10216298 -0.25610036 C 1.41909100 1.26345446 -0.34254814 C 0.71738150 0.10901545 -0.08456145 H 0.93627498 2.19419272 -0.61095402 C -0.71741859 -0.10899254 -0.08455524 S -1.82328469 1.20374179 -0.44105740 C -1.41912820 -1.26343144 0.17343142 C -3.19922829 0.16690023 -0.25767458 C -2.82941826 -1.10493701 0.07562280 H -3.53750269 -1.90709774 0.23645949 H 4.19429620 -0.57452886 0.18632814 H 3.53860725 1.89960515 -0.43610218 H -4.19239866 0.56181917 -0.40716131 H -0.93481970 -2.20399421 0.40193462 $end $rem jobtype = opt exchange = b3lyp basis = 6-31++g** max_scf_cycles = 75 mem_static = 100 mem_total = 1500 $end $opt CONSTRAINT tors 4 5 7 9 180.0 ENDCONSTRAINT $end ''' self.assertEqual(str(qcin), qcin_ans) constraint = qcin.params['opt'] constraint_ans = [['tors', 4, 5, 7, 9, 180.0]] self.assertEqual(constraint, constraint_ans) if __name__ == "__main__": unittest.main()
aykol/pymatgen
pymatgen/io/tests/test_qchem.py
Python
mit
81,842
0.00033
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016, Dag Wieers <dag@wieers.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: wakeonlan version_added: '2.2' short_description: Send a magic Wake-on-LAN (WoL) broadcast packet description: - The C(wakeonlan) module sends magic Wake-on-LAN (WoL) broadcast packets. options: mac: description: - MAC address to send Wake-on-LAN broadcast packet for. required: true broadcast: description: - Network broadcast address to use for broadcasting magic Wake-on-LAN packet. default: 255.255.255.255 port: description: - UDP port to use for magic Wake-on-LAN packet. default: 7 author: "Dag Wieers (@dagwieers)" todo: - Add arping support to check whether the system is up (before and after) - Enable check-mode support (when we have arping support) - Does not have SecureOn password support notes: - This module sends a magic packet, without knowing whether it worked - Only works if the target system was properly configured for Wake-on-LAN (in the BIOS and/or the OS) - Some BIOSes have a different (configurable) Wake-on-LAN boot order (i.e. PXE first) when turned off ''' EXAMPLES = ''' - name: Send a magic Wake-on-LAN packet to 00:00:5E:00:53:66 wakeonlan: mac: '00:00:5E:00:53:66' broadcast: 192.0.2.23 delegate_to: localhost - wakeonlan: mac: 00:00:5E:00:53:66 port: 9 delegate_to: localhost ''' RETURN=''' # Default return values ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception import socket import struct def wakeonlan(module, mac, broadcast, port): """ Send a magic Wake-on-LAN packet. """ mac_orig = mac # Remove possible separator from MAC address if len(mac) == 12 + 5: mac = mac.replace(mac[2], '') # If we don't end up with 12 hexadecimal characters, fail if len(mac) != 12: module.fail_json(msg="Incorrect MAC address length: %s" % mac_orig) # Test if it converts to an integer, otherwise fail try: int(mac, 16) except ValueError: module.fail_json(msg="Incorrect MAC address format: %s" % mac_orig) # Create payload for magic packet data = '' padding = ''.join(['FFFFFFFFFFFF', mac * 20]) for i in range(0, len(padding), 2): data = ''.join([data, struct.pack('B', int(padding[i: i + 2], 16))]) # Broadcast payload to network sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) try: sock.sendto(data, (broadcast, port)) except socket.error: e = get_exception() sock.close() module.fail_json(msg=str(e)) sock.close() def main(): module = AnsibleModule( argument_spec = dict( mac = dict(type='str', required=True), broadcast = dict(type='str', default='255.255.255.255'), port = dict(type='int', default=7), ), supports_check_mode = True, ) mac = module.params['mac'] broadcast = module.params['broadcast'] port = module.params['port'] if not module.check_mode: wakeonlan(module, mac, broadcast, port) module.exit_json(changed=True) if __name__ == '__main__': main()
andreaso/ansible
lib/ansible/modules/remote_management/wakeonlan.py
Python
gpl-3.0
4,077
0.004415
#!/usr/bin/python # # Copyright 2007 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Unittests for the portpicker module.""" from __future__ import print_function import errno import os import random import socket import sys import unittest from contextlib import ExitStack if sys.platform == 'win32': import _winapi else: _winapi = None try: # pylint: disable=no-name-in-module from unittest import mock # Python >= 3.3. except ImportError: import mock # https://pypi.python.org/pypi/mock import portpicker class PickUnusedPortTest(unittest.TestCase): def IsUnusedTCPPort(self, port): return self._bind(port, socket.SOCK_STREAM, socket.IPPROTO_TCP) def IsUnusedUDPPort(self, port): return self._bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP) def setUp(self): # So we can Bind even if portpicker.bind is stubbed out. self._bind = portpicker.bind portpicker._owned_ports.clear() portpicker._free_ports.clear() portpicker._random_ports.clear() def testPickUnusedPortActuallyWorks(self): """This test can be flaky.""" for _ in range(10): port = portpicker.pick_unused_port() self.assertTrue(self.IsUnusedTCPPort(port)) self.assertTrue(self.IsUnusedUDPPort(port)) @unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ, 'no port server to test against') def testPickUnusedCanSuccessfullyUsePortServer(self): with mock.patch.object(portpicker, '_pick_unused_port_without_server'): portpicker._pick_unused_port_without_server.side_effect = ( Exception('eek!') ) # Since _PickUnusedPortWithoutServer() raises an exception, if we # can successfully obtain a port, the portserver must be working. port = portpicker.pick_unused_port() self.assertTrue(self.IsUnusedTCPPort(port)) self.assertTrue(self.IsUnusedUDPPort(port)) @unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ, 'no port server to test against') def testPickUnusedCanSuccessfullyUsePortServerAddressKwarg(self): with mock.patch.object(portpicker, '_pick_unused_port_without_server'): portpicker._pick_unused_port_without_server.side_effect = ( Exception('eek!') ) # Since _PickUnusedPortWithoutServer() raises an exception, and # we've temporarily removed PORTSERVER_ADDRESS from os.environ, if # we can successfully obtain a port, the portserver must be working. addr = os.environ.pop('PORTSERVER_ADDRESS') try: port = portpicker.pick_unused_port(portserver_address=addr) self.assertTrue(self.IsUnusedTCPPort(port)) self.assertTrue(self.IsUnusedUDPPort(port)) finally: os.environ['PORTSERVER_ADDRESS'] = addr @unittest.skipIf('PORTSERVER_ADDRESS' not in os.environ, 'no port server to test against') def testGetPortFromPortServer(self): """Exercise the get_port_from_port_server() helper function.""" for _ in range(10): port = portpicker.get_port_from_port_server( os.environ['PORTSERVER_ADDRESS']) self.assertTrue(self.IsUnusedTCPPort(port)) self.assertTrue(self.IsUnusedUDPPort(port)) def testSendsPidToPortServer(self): with ExitStack() as stack: if _winapi: create_file_mock = mock.Mock() create_file_mock.return_value = 0 read_file_mock = mock.Mock() write_file_mock = mock.Mock() read_file_mock.return_value = (b'42768\n', 0) stack.enter_context( mock.patch('_winapi.CreateFile', new=create_file_mock)) stack.enter_context( mock.patch('_winapi.WriteFile', new=write_file_mock)) stack.enter_context( mock.patch('_winapi.ReadFile', new=read_file_mock)) port = portpicker.get_port_from_port_server( 'portserver', pid=1234) write_file_mock.assert_called_once_with(0, b'1234\n') else: server = mock.Mock() server.recv.return_value = b'42768\n' stack.enter_context( mock.patch.object(socket, 'socket', return_value=server)) port = portpicker.get_port_from_port_server( 'portserver', pid=1234) server.sendall.assert_called_once_with(b'1234\n') self.assertEqual(port, 42768) def testPidDefaultsToOwnPid(self): with ExitStack() as stack: stack.enter_context( mock.patch.object(os, 'getpid', return_value=9876)) if _winapi: create_file_mock = mock.Mock() create_file_mock.return_value = 0 read_file_mock = mock.Mock() write_file_mock = mock.Mock() read_file_mock.return_value = (b'52768\n', 0) stack.enter_context( mock.patch('_winapi.CreateFile', new=create_file_mock)) stack.enter_context( mock.patch('_winapi.WriteFile', new=write_file_mock)) stack.enter_context( mock.patch('_winapi.ReadFile', new=read_file_mock)) port = portpicker.get_port_from_port_server('portserver') write_file_mock.assert_called_once_with(0, b'9876\n') else: server = mock.Mock() server.recv.return_value = b'52768\n' stack.enter_context( mock.patch.object(socket, 'socket', return_value=server)) port = portpicker.get_port_from_port_server('portserver') server.sendall.assert_called_once_with(b'9876\n') self.assertEqual(port, 52768) @mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': 'portserver'}) def testReusesPortServerPorts(self): with ExitStack() as stack: if _winapi: read_file_mock = mock.Mock() read_file_mock.side_effect = [ (b'12345\n', 0), (b'23456\n', 0), (b'34567\n', 0), ] stack.enter_context(mock.patch('_winapi.CreateFile')) stack.enter_context(mock.patch('_winapi.WriteFile')) stack.enter_context( mock.patch('_winapi.ReadFile', new=read_file_mock)) else: server = mock.Mock() server.recv.side_effect = [b'12345\n', b'23456\n', b'34567\n'] stack.enter_context( mock.patch.object(socket, 'socket', return_value=server)) self.assertEqual(portpicker.pick_unused_port(), 12345) self.assertEqual(portpicker.pick_unused_port(), 23456) portpicker.return_port(12345) self.assertEqual(portpicker.pick_unused_port(), 12345) @mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''}) def testDoesntReuseRandomPorts(self): ports = set() for _ in range(10): try: port = portpicker.pick_unused_port() except portpicker.NoFreePortFoundError: # This sometimes happens when not using portserver. Just # skip to the next attempt. continue ports.add(port) portpicker.return_port(port) self.assertGreater(len(ports), 5) # Allow some random reuse. def testReturnsReservedPorts(self): with mock.patch.object(portpicker, '_pick_unused_port_without_server'): portpicker._pick_unused_port_without_server.side_effect = ( Exception('eek!')) # Arbitrary port. In practice you should get this from somewhere # that assigns ports. reserved_port = 28465 portpicker.add_reserved_port(reserved_port) ports = set() for _ in range(10): port = portpicker.pick_unused_port() ports.add(port) portpicker.return_port(port) self.assertEqual(len(ports), 1) self.assertEqual(ports.pop(), reserved_port) @mock.patch.dict(os.environ,{'PORTSERVER_ADDRESS': ''}) def testFallsBackToRandomAfterRunningOutOfReservedPorts(self): # Arbitrary port. In practice you should get this from somewhere # that assigns ports. reserved_port = 23456 portpicker.add_reserved_port(reserved_port) self.assertEqual(portpicker.pick_unused_port(), reserved_port) self.assertNotEqual(portpicker.pick_unused_port(), reserved_port) def testRandomlyChosenPorts(self): # Unless this box is under an overwhelming socket load, this test # will heavily exercise the "pick a port randomly" part of the # port picking code, but may never hit the "OS assigns a port" # code. ports = 0 for _ in range(100): try: port = portpicker._pick_unused_port_without_server() except portpicker.NoFreePortFoundError: # Without the portserver, pick_unused_port can sometimes fail # to find a free port. Check that it passes most of the time. continue self.assertTrue(self.IsUnusedTCPPort(port)) self.assertTrue(self.IsUnusedUDPPort(port)) ports += 1 # Getting a port shouldn't have failed very often, even on machines # with a heavy socket load. self.assertGreater(ports, 95) def testOSAssignedPorts(self): self.last_assigned_port = None def error_for_explicit_ports(port, socket_type, socket_proto): # Only successfully return a port if an OS-assigned port is # requested, or if we're checking that the last OS-assigned port # is unused on the other protocol. if port == 0 or port == self.last_assigned_port: self.last_assigned_port = self._bind(port, socket_type, socket_proto) return self.last_assigned_port else: return None with mock.patch.object(portpicker, 'bind', error_for_explicit_ports): # Without server, this can be little flaky, so check that it # passes most of the time. ports = 0 for _ in range(100): try: port = portpicker._pick_unused_port_without_server() except portpicker.NoFreePortFoundError: continue self.assertTrue(self.IsUnusedTCPPort(port)) self.assertTrue(self.IsUnusedUDPPort(port)) ports += 1 self.assertGreater(ports, 70) def pickUnusedPortWithoutServer(self): # Try a few times to pick a port, to avoid flakiness and to make sure # the code path we want was exercised. for _ in range(5): try: port = portpicker._pick_unused_port_without_server() except portpicker.NoFreePortFoundError: continue else: self.assertTrue(self.IsUnusedTCPPort(port)) self.assertTrue(self.IsUnusedUDPPort(port)) return self.fail("Failed to find a free port") def testPickPortsWithoutServer(self): # Test the first part of _pick_unused_port_without_server, which # tries a few random ports and checks is_port_free. self.pickUnusedPortWithoutServer() # Now test the second part, the fallback from above, which asks the # OS for a port. def mock_port_free(port): return False with mock.patch.object(portpicker, 'is_port_free', mock_port_free): self.pickUnusedPortWithoutServer() def checkIsPortFree(self): """This might be flaky unless this test is run with a portserver.""" # The port should be free initially. port = portpicker.pick_unused_port() self.assertTrue(portpicker.is_port_free(port)) cases = [ (socket.AF_INET, socket.SOCK_STREAM, None), (socket.AF_INET6, socket.SOCK_STREAM, 1), (socket.AF_INET, socket.SOCK_DGRAM, None), (socket.AF_INET6, socket.SOCK_DGRAM, 1), ] # Using v6only=0 on Windows doesn't result in collisions if not _winapi: cases.extend([ (socket.AF_INET6, socket.SOCK_STREAM, 0), (socket.AF_INET6, socket.SOCK_DGRAM, 0), ]) for (sock_family, sock_type, v6only) in cases: # Occupy the port on a subset of possible protocols. try: sock = socket.socket(sock_family, sock_type, 0) except socket.error: print('Kernel does not support sock_family=%d' % sock_family, file=sys.stderr) # Skip this case, since we cannot occupy a port. continue if not hasattr(socket, 'IPPROTO_IPV6'): v6only = None if v6only is not None: try: sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, v6only) except socket.error: print('Kernel does not support IPV6_V6ONLY=%d' % v6only, file=sys.stderr) # Don't care; just proceed with the default. # Socket may have been taken in the mean time, so catch the # socket.error with errno set to EADDRINUSE and skip this # attempt. try: sock.bind(('', port)) except socket.error as e: if e.errno == errno.EADDRINUSE: raise portpicker.NoFreePortFoundError raise # The port should be busy. self.assertFalse(portpicker.is_port_free(port)) sock.close() # Now it's free again. self.assertTrue(portpicker.is_port_free(port)) def testIsPortFree(self): # This can be quite flaky on a busy host, so try a few times. for _ in range(10): try: self.checkIsPortFree() except portpicker.NoFreePortFoundError: pass else: return self.fail("checkPortIsFree failed every time.") def testIsPortFreeException(self): port = portpicker.pick_unused_port() with mock.patch.object(socket, 'socket') as mock_sock: mock_sock.side_effect = socket.error('fake socket error', 0) self.assertFalse(portpicker.is_port_free(port)) def testThatLegacyCapWordsAPIsExist(self): """The original APIs were CapWords style, 1.1 added PEP8 names.""" self.assertEqual(portpicker.bind, portpicker.Bind) self.assertEqual(portpicker.is_port_free, portpicker.IsPortFree) self.assertEqual(portpicker.pick_unused_port, portpicker.PickUnusedPort) self.assertEqual(portpicker.get_port_from_port_server, portpicker.GetPortFromPortServer) if __name__ == '__main__': unittest.main()
google/python_portpicker
src/tests/portpicker_test.py
Python
apache-2.0
16,155
0.000371
# copies.py - copy detection for Mercurial # # Copyright 2008 Matt Mackall <mpm@selenic.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. import util import heapq def _nonoverlap(d1, d2, d3): "Return list of elements in d1 not in d2 or d3" return sorted([d for d in d1 if d not in d3 and d not in d2]) def _dirname(f): s = f.rfind("/") if s == -1: return "" return f[:s] def _findlimit(repo, a, b): """Find the earliest revision that's an ancestor of a or b but not both, None if no such revision exists. """ # basic idea: # - mark a and b with different sides # - if a parent's children are all on the same side, the parent is # on that side, otherwise it is on no side # - walk the graph in topological order with the help of a heap; # - add unseen parents to side map # - clear side of any parent that has children on different sides # - track number of interesting revs that might still be on a side # - track the lowest interesting rev seen # - quit when interesting revs is zero cl = repo.changelog working = len(cl) # pseudo rev for the working directory if a is None: a = working if b is None: b = working side = {a: -1, b: 1} visit = [-a, -b] heapq.heapify(visit) interesting = len(visit) hascommonancestor = False limit = working while interesting: r = -heapq.heappop(visit) if r == working: parents = [cl.rev(p) for p in repo.dirstate.parents()] else: parents = cl.parentrevs(r) for p in parents: if p < 0: continue if p not in side: # first time we see p; add it to visit side[p] = side[r] if side[p]: interesting += 1 heapq.heappush(visit, -p) elif side[p] and side[p] != side[r]: # p was interesting but now we know better side[p] = 0 interesting -= 1 hascommonancestor = True if side[r]: limit = r # lowest rev visited interesting -= 1 if not hascommonancestor: return None return limit def _chain(src, dst, a, b): '''chain two sets of copies a->b''' t = a.copy() for k, v in b.iteritems(): if v in t: # found a chain if t[v] != k: # file wasn't renamed back to itself t[k] = t[v] if v not in dst: # chain was a rename, not a copy del t[v] if v in src: # file is a copy of an existing file t[k] = v # remove criss-crossed copies for k, v in t.items(): if k in src and v in dst: del t[k] return t def _tracefile(fctx, actx): '''return file context that is the ancestor of fctx present in actx''' stop = actx.rev() am = actx.manifest() for f in fctx.ancestors(): if am.get(f.path(), None) == f.filenode(): return f if f.rev() < stop: return None def _dirstatecopies(d): ds = d._repo.dirstate c = ds.copies().copy() for k in c.keys(): if ds[k] not in 'anm': del c[k] return c def _forwardcopies(a, b): '''find {dst@b: src@a} copy mapping where a is an ancestor of b''' # check for working copy w = None if b.rev() is None: w = b b = w.p1() if a == b: # short-circuit to avoid issues with merge states return _dirstatecopies(w) # find where new files came from # we currently don't try to find where old files went, too expensive # this means we can miss a case like 'hg rm b; hg cp a b' cm = {} missing = set(b.manifest().iterkeys()) missing.difference_update(a.manifest().iterkeys()) for f in missing: ofctx = _tracefile(b[f], a) if ofctx: cm[f] = ofctx.path() # combine copies from dirstate if necessary if w is not None: cm = _chain(a, w, cm, _dirstatecopies(w)) return cm def _backwardrenames(a, b): # Even though we're not taking copies into account, 1:n rename situations # can still exist (e.g. hg cp a b; hg mv a c). In those cases we # arbitrarily pick one of the renames. f = _forwardcopies(b, a) r = {} for k, v in sorted(f.iteritems()): # remove copies if v in a: continue r[v] = k return r def pathcopies(x, y): '''find {dst@y: src@x} copy mapping for directed compare''' if x == y or not x or not y: return {} a = y.ancestor(x) if a == x: return _forwardcopies(x, y) if a == y: return _backwardrenames(x, y) return _chain(x, y, _backwardrenames(x, a), _forwardcopies(a, y)) def mergecopies(repo, c1, c2, ca): """ Find moves and copies between context c1 and c2 that are relevant for merging. Returns four dicts: "copy", "movewithdir", "diverge", and "renamedelete". "copy" is a mapping from destination name -> source name, where source is in c1 and destination is in c2 or vice-versa. "movewithdir" is a mapping from source name -> destination name, where the file at source present in one context but not the other needs to be moved to destination by the merge process, because the other context moved the directory it is in. "diverge" is a mapping of source name -> list of destination names for divergent renames. "renamedelete" is a mapping of source name -> list of destination names for files deleted in c1 that were renamed in c2 or vice-versa. """ # avoid silly behavior for update from empty dir if not c1 or not c2 or c1 == c2: return {}, {}, {}, {} # avoid silly behavior for parent -> working dir if c2.node() is None and c1.node() == repo.dirstate.p1(): return repo.dirstate.copies(), {}, {}, {} limit = _findlimit(repo, c1.rev(), c2.rev()) if limit is None: # no common ancestor, no copies return {}, {}, {}, {} m1 = c1.manifest() m2 = c2.manifest() ma = ca.manifest() def makectx(f, n): if len(n) != 20: # in a working context? if c1.rev() is None: return c1.filectx(f) return c2.filectx(f) return repo.filectx(f, fileid=n) ctx = util.lrucachefunc(makectx) copy = {} movewithdir = {} fullcopy = {} diverge = {} def _checkcopies(f, m1, m2): checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy) repo.ui.debug(" searching for copies back to rev %d\n" % limit) u1 = _nonoverlap(m1, m2, ma) u2 = _nonoverlap(m2, m1, ma) if u1: repo.ui.debug(" unmatched files in local:\n %s\n" % "\n ".join(u1)) if u2: repo.ui.debug(" unmatched files in other:\n %s\n" % "\n ".join(u2)) for f in u1: _checkcopies(f, m1, m2) for f in u2: _checkcopies(f, m2, m1) renamedelete = {} renamedelete2 = set() diverge2 = set() for of, fl in diverge.items(): if len(fl) == 1 or of in c1 or of in c2: del diverge[of] # not actually divergent, or not a rename if of not in c1 and of not in c2: # renamed on one side, deleted on the other side, but filter # out files that have been renamed and then deleted renamedelete[of] = [f for f in fl if f in c1 or f in c2] renamedelete2.update(fl) # reverse map for below else: diverge2.update(fl) # reverse map for below if fullcopy: repo.ui.debug(" all copies found (* = to merge, ! = divergent, " "% = renamed and deleted):\n") for f in sorted(fullcopy): note = "" if f in copy: note += "*" if f in diverge2: note += "!" if f in renamedelete2: note += "%" repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, note)) del diverge2 if not fullcopy: return copy, movewithdir, diverge, renamedelete repo.ui.debug(" checking for directory renames\n") # generate a directory move map d1, d2 = c1.dirs(), c2.dirs() d1.addpath('/') d2.addpath('/') invalid = set() dirmove = {} # examine each file copy for a potential directory move, which is # when all the files in a directory are moved to a new directory for dst, src in fullcopy.iteritems(): dsrc, ddst = _dirname(src), _dirname(dst) if dsrc in invalid: # already seen to be uninteresting continue elif dsrc in d1 and ddst in d1: # directory wasn't entirely moved locally invalid.add(dsrc) elif dsrc in d2 and ddst in d2: # directory wasn't entirely moved remotely invalid.add(dsrc) elif dsrc in dirmove and dirmove[dsrc] != ddst: # files from the same directory moved to two different places invalid.add(dsrc) else: # looks good so far dirmove[dsrc + "/"] = ddst + "/" for i in invalid: if i in dirmove: del dirmove[i] del d1, d2, invalid if not dirmove: return copy, movewithdir, diverge, renamedelete for d in dirmove: repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])) # check unaccounted nonoverlapping files against directory moves for f in u1 + u2: if f not in fullcopy: for d in dirmove: if f.startswith(d): # new file added in a directory that was moved, move it df = dirmove[d] + f[len(d):] if df not in copy: movewithdir[f] = df repo.ui.debug((" pending file src: '%s' -> " "dst: '%s'\n") % (f, df)) break return copy, movewithdir, diverge, renamedelete def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy): """ check possible copies of f from m1 to m2 ctx = function accepting (filename, node) that returns a filectx. f = the filename to check m1 = the source manifest m2 = the destination manifest ca = the changectx of the common ancestor limit = the rev number to not search beyond diverge = record all diverges in this dict copy = record all non-divergent copies in this dict fullcopy = record all copies in this dict """ ma = ca.manifest() def _related(f1, f2, limit): # Walk back to common ancestor to see if the two files originate # from the same file. Since workingfilectx's rev() is None it messes # up the integer comparison logic, hence the pre-step check for # None (f1 and f2 can only be workingfilectx's initially). if f1 == f2: return f1 # a match g1, g2 = f1.ancestors(), f2.ancestors() try: f1r, f2r = f1.rev(), f2.rev() if f1r is None: f1 = g1.next() if f2r is None: f2 = g2.next() while True: f1r, f2r = f1.rev(), f2.rev() if f1r > f2r: f1 = g1.next() elif f2r > f1r: f2 = g2.next() elif f1 == f2: return f1 # a match elif f1r == f2r or f1r < limit or f2r < limit: return False # copy no longer relevant except StopIteration: return False of = None seen = set([f]) for oc in ctx(f, m1[f]).ancestors(): ocr = oc.rev() of = oc.path() if of in seen: # check limit late - grab last rename before if ocr < limit: break continue seen.add(of) fullcopy[f] = of # remember for dir rename detection if of not in m2: continue # no match, keep looking if m2[of] == ma.get(of): break # no merge needed, quit early c2 = ctx(of, m2[of]) cr = _related(oc, c2, ca.rev()) if cr and (of == f or of == c2.path()): # non-divergent copy[f] = of of = None break if of in ma: diverge.setdefault(of, []).append(f)
vmg/hg-stable
mercurial/copies.py
Python
gpl-2.0
12,819
0.001872
from __future__ import absolute_import import os import zmq import uuid as uuid_pkg import time import binascii import random import socket import struct import marshal import mmap from multiprocessing import Manager, Condition from mmap import ACCESS_WRITE, ACCESS_READ from dpark.utils.log import get_logger from dpark.utils import compress, decompress, spawn from dpark.cache import Cache from dpark.serialize import marshalable from dpark.env import env import six from six.moves import range, map, cPickle try: from itertools import izip except ImportError: izip = zip logger = get_logger(__name__) MARSHAL_TYPE, PICKLE_TYPE = list(range(2)) BLOCK_SHIFT = 20 BLOCK_SIZE = 1 << BLOCK_SHIFT GUIDE_ADDR = 'NewBroadcastGuideAddr' DOWNLOAD_ADDR = 'NewDownloadAddr' BATCHED_BLOCKS = 3 GUIDE_STOP, GUIDE_GET_SOURCES, GUIDE_SET_SOURCES, GUIDE_REPORT_BAD = list(range(4)) SERVER_STOP, SERVER_FETCH, SERVER_FETCH_FAIL, SERVER_FETCH_OK, \ DATA_GET, DATA_GET_OK, DATA_GET_FAIL, DATA_DOWNLOADING, SERVER_CLEAR_ITEM = list(range(9)) class GuideManager(object): def __init__(self): self._started = False self.guides = {} self.host = socket.gethostname() self.guide_thread = None self.guide_addr = None self.register_addr = {} self.ctx = zmq.Context() def start(self): if self._started: return self._started = True self.guide_thread = self.start_guide() env.register(GUIDE_ADDR, self.guide_addr) def start_guide(self): sock = self.ctx.socket(zmq.REP) port = sock.bind_to_random_port('tcp://0.0.0.0') self.guide_addr = 'tcp://%s:%d' % (self.host, port) def run(): logger.debug("guide start at %s", self.guide_addr) while self._started: if not sock.poll(1000, zmq.POLLIN): continue type_, msg = sock.recv_pyobj() if type_ == GUIDE_STOP: sock.send_pyobj(0) break elif type_ == GUIDE_GET_SOURCES: uuid = msg sources = None if uuid in self.guides: sources = self.guides[uuid] else: logger.warning('uuid %s NOT REGISTERED in guide server', uuid) sock.send_pyobj(sources) elif type_ == GUIDE_SET_SOURCES: uuid, addr, bitmap = msg if any(bitmap): sources = None if uuid in self.guides: sources = self.guides[uuid] if sources: sources[addr] = bitmap else: self.guides[uuid] = {addr: bitmap} self.register_addr[uuid] = addr sock.send_pyobj(None) elif type_ == GUIDE_REPORT_BAD: uuid, addr = msg sources = self.guides[uuid] if addr in sources: if addr != self.register_addr[uuid]: del sources[addr] else: logger.warning('The addr %s to delete is the register Quit!!!', addr) sock.send_pyobj(None) else: logger.error('Unknown guide message: %s %s', type_, msg) sock.send_pyobj(None) return spawn(run) def shutdown(self): if not self._started: return self._started = False if self.guide_thread and self.guide_addr. \ startswith('tcp://%s:' % socket.gethostname()): self.guide_thread.join(timeout=1) if self.guide_thread.is_alive(): logger.warning("guide_thread not stopped.") self.guide_addr = None def check_memory(location): try: import psutil pid = os.getpid() p = psutil.Process(pid) rss = p.memory_info().rss >> 20 logger.info('memory rss %d MB in host %s at ', rss, socket.gethostname(), location) except ImportError: logger.warning('import psutil failed') class DownloadManager(object): def __init__(self): self._started = False self.server_thread = None self.download_threads = {} self.uuid_state_dict = None self.uuid_map_dict = None self.guide_addr = None self.server_addr = None self.host = None self.ctx = None self.random_inst = None self.master_broadcast_blocks = {} def start(self): if self._started: return self.manager = manager = Manager() self.shared_uuid_fn_dict = manager.dict() self.shared_uuid_map_dict = manager.dict() self.shared_master_blocks = manager.dict() self.download_cond = Condition() self._started = True self.ctx = zmq.Context() self.host = socket.gethostname() if GUIDE_ADDR not in env.environ: start_guide_manager() self.guide_addr = env.get(GUIDE_ADDR) self.random_inst = random.SystemRandom() self.server_addr, self.server_thread = self.start_server() self.uuid_state_dict = {} self.uuid_map_dict = {} self.master_broadcast_blocks = {} env.register(DOWNLOAD_ADDR, self.server_addr) def start_server(self): sock = self.ctx.socket(zmq.REP) sock.setsockopt(zmq.LINGER, 0) port = sock.bind_to_random_port("tcp://0.0.0.0") server_addr = 'tcp://%s:%d' % (self.host, port) guide_sock = self.ctx.socket(zmq.REQ) guide_sock.setsockopt(zmq.LINGER, 0) guide_sock.connect(self.guide_addr) def run(): logger.debug("server started at %s", server_addr) while self._started: if not sock.poll(1000, zmq.POLLIN): continue type_, msg = sock.recv_pyobj() logger.debug('server recv: %s %s', type_, msg) if type_ == SERVER_STOP: sock.send_pyobj(None) break elif type_ == SERVER_FETCH: uuid, indices, client_addr = msg if uuid in self.master_broadcast_blocks: block_num = len(self.master_broadcast_blocks[uuid]) bls = [] for index in indices: if index >= block_num: logger.warning('input index too big %s for ' 'len of blocks %d from host %s', str(indices), block_num, client_addr) sock.send_pyobj((SERVER_FETCH_FAIL, None)) else: bls.append(self.master_broadcast_blocks[uuid][index]) sock.send_pyobj((SERVER_FETCH_OK, (indices, bls))) elif uuid in self.uuid_state_dict: fd = os.open(self.uuid_state_dict[uuid][0], os.O_RDONLY) mmfp = mmap.mmap(fd, 0, access=ACCESS_READ) os.close(fd) bitmap = self.uuid_map_dict[uuid] block_num = len(bitmap) bls = [] for index in indices: if index >= block_num: logger.warning('input index too big %s for ' 'len of blocks %d from host %s', str(indices), block_num, client_addr) sock.send_pyobj((SERVER_FETCH_FAIL, None)) else: mmfp.seek(bitmap[index][0]) block = mmfp.read(bitmap[index][1]) bls.append(block) mmfp.close() sock.send_pyobj((SERVER_FETCH_OK, (indices, bls))) else: logger.warning('server fetch failed for uuid %s ' 'not exists in server %s from host %s', uuid, socket.gethostname(), client_addr) sock.send_pyobj((SERVER_FETCH_FAIL, None)) elif type_ == DATA_GET: uuid, compressed_size = msg if uuid not in self.uuid_state_dict or not self.uuid_state_dict[uuid][1]: if uuid not in self.download_threads: sources = self._get_sources(uuid, guide_sock) if not sources: logger.warning('get sources from guide server failed in host %s', socket.gethostname()) sock.send_pyobj(DATA_GET_FAIL) continue self.download_threads[uuid] = spawn(self._download_blocks, *[sources, uuid, compressed_size]) sock.send_pyobj(DATA_DOWNLOADING) else: sock.send_pyobj(DATA_DOWNLOADING) else: sock.send_pyobj(DATA_GET_OK) elif type_ == SERVER_CLEAR_ITEM: uuid = msg self.clear(uuid) sock.send_pyobj(None) else: logger.error('Unknown server message: %s %s', type_, msg) sock.send_pyobj(None) sock.close() logger.debug("stop Broadcast server %s", server_addr) for uuid in list(self.uuid_state_dict.keys()): self.clear(uuid) return server_addr, spawn(run) def get_blocks(self, uuid): if uuid in self.master_broadcast_blocks: return self.master_broadcast_blocks[uuid] if uuid in self.shared_master_blocks: return self.shared_master_blocks[uuid] def register_blocks(self, uuid, blocks): if uuid in self.master_broadcast_blocks: logger.warning('the block uuid %s exists in dict', uuid) return self.master_broadcast_blocks[uuid] = blocks self.shared_master_blocks[uuid] = blocks def _get_sources(self, uuid, source_sock): try: source_sock.send_pyobj((GUIDE_GET_SOURCES, uuid)) sources = source_sock.recv_pyobj() except: logger.warning('GET sources failed for addr %s with ZMQ ERR', self.server_addr) sources = {} return sources def _update_sources(self, uuid, bitmap, source_sock): try: source_sock.send_pyobj((GUIDE_SET_SOURCES, (uuid, self.server_addr, bitmap))) source_sock.recv_pyobj() except: pass def _download_blocks(self, sources, uuid, compressed_size): block_num = 0 bitmap = [0] write_mmap_handler = None download_guide_sock = self.ctx.socket(zmq.REQ) download_guide_sock.setsockopt(zmq.LINGER, 0) download_guide_sock.connect(self.guide_addr) def _report_bad(addr): logger.debug('fetch blocks failed from server %s', addr) download_guide_sock.send_pyobj((GUIDE_REPORT_BAD, (uuid, addr))) download_guide_sock.recv_pyobj() def _fetch(addr, indices, bit_map): sock = self.ctx.socket(zmq.REQ) try: sock.setsockopt(zmq.LINGER, 0) sock.connect(addr) sock.send_pyobj((SERVER_FETCH, (uuid, indices, self.server_addr))) avail = sock.poll(1 * 1000, zmq.POLLIN) check_sock = None if not avail: try: check_sock = socket.socket() addr_list = addr[len('tcp://'):].split(':') addr_list[1] = int(addr_list[1]) check_sock.connect(tuple(addr_list)) except Exception as e: logger.warning('connect the addr %s failed with exception %s', addr, e) _report_bad(addr) else: logger.debug("%s recv broadcast %s from %s timeout", self.server_addr, str(indices), addr) finally: if check_sock: check_sock.close() return result, msg = sock.recv_pyobj() if result == SERVER_FETCH_FAIL: _report_bad(addr) return if result == SERVER_FETCH_OK: indices, blocks = msg for rank, index in enumerate(indices): if blocks[rank] is not None: write_mmap_handler.seek(bit_map[index][0]) write_mmap_handler.write(blocks[rank]) bitmap[index] = bit_map[index] else: raise RuntimeError('Unknown server response: %s %s' % (result, msg)) finally: sock.close() final_path = env.workdir.alloc_tmp_file("broadcast") self.uuid_state_dict[uuid] = (final_path, False) fp = open(final_path, 'wb') fp.truncate(compressed_size) fp.close() fd = os.open(final_path, os.O_RDWR) write_mmap_handler = mmap.mmap(fd, 0, access=ACCESS_WRITE) os.close(fd) while not all(bitmap): remote = [] for _addr, _bitmap in six.iteritems(sources): if block_num == 0: block_num = len(_bitmap) bitmap = [0] * block_num self.uuid_map_dict[uuid] = bitmap if not _addr.startswith('tcp://%s:' % self.host): remote.append((_addr, _bitmap)) self.random_inst.shuffle(remote) for _addr, _bitmap in remote: _indices = [i for i in range(block_num) if not bitmap[i] and _bitmap[i]] if _indices: self.random_inst.shuffle(_indices) _fetch(_addr, _indices[:BATCHED_BLOCKS], _bitmap) self._update_sources(uuid, bitmap, download_guide_sock) sources = self._get_sources(uuid, download_guide_sock) write_mmap_handler.flush() write_mmap_handler.close() self.shared_uuid_map_dict[uuid] = bitmap self.shared_uuid_fn_dict[uuid] = self.uuid_state_dict[uuid][0] self.uuid_state_dict[uuid] = self.uuid_state_dict[uuid][0], True download_guide_sock.close() with self.download_cond: self.download_cond.notify_all() def clear(self, uuid): if uuid in self.master_broadcast_blocks: del self.master_broadcast_blocks[uuid] del self.shared_master_blocks[uuid] if uuid in self.uuid_state_dict: del self.uuid_state_dict[uuid] if uuid in self.shared_uuid_fn_dict: del self.shared_uuid_fn_dict[uuid] del self.shared_uuid_map_dict[uuid] def shutdown(self): if not self._started: return self._started = False if self.server_thread and self.server_addr. \ startswith('tcp://%s:' % socket.gethostname()): for _, th in six.iteritems(self.download_threads): th.join(timeout=0.1) # only in executor, not needed self.server_thread.join(timeout=1) if self.server_thread.is_alive(): logger.warning("Download mananger server_thread not stopped.") self.manager.shutdown() # shutdown will try join and terminate server process def accumulate_list(l): acc = 0 acc_l = [] for item in l: acc_l.append(acc) acc += item acc_l.append(acc) return acc_l class BroadcastManager(object): header_fmt = '>BI' header_len = struct.calcsize(header_fmt) def __init__(self): self._started = False self.guide_addr = None self.download_addr = None self.cache = None self.shared_uuid_fn_dict = None self.shared_uuid_map_dict = None self.download_cond = None self.ctx = None def start(self): if self._started: return self._started = True start_download_manager() self.guide_addr = env.get(GUIDE_ADDR) self.download_addr = env.get(DOWNLOAD_ADDR) self.cache = Cache() self.ctx = zmq.Context() self.shared_uuid_fn_dict = _download_manager.shared_uuid_fn_dict self.shared_uuid_map_dict = _download_manager.shared_uuid_map_dict self.download_cond = _download_manager.download_cond def register(self, uuid, value): self.start() if uuid in self.shared_uuid_fn_dict: raise RuntimeError('broadcast %s has already registered' % uuid) blocks, size, block_map = self.to_blocks(uuid, value) _download_manager.register_blocks(uuid, blocks) self._update_sources(uuid, block_map) self.cache.put(uuid, value) return size def _update_sources(self, uuid, bitmap): guide_sock = self.ctx.socket(zmq.REQ) try: guide_sock.setsockopt(zmq.LINGER, 0) guide_sock.connect(self.guide_addr) guide_sock.send_pyobj((GUIDE_SET_SOURCES, (uuid, self.download_addr, bitmap))) guide_sock.recv_pyobj() finally: guide_sock.close() def clear(self, uuid): assert self._started self.cache.put(uuid, None) sock = self.ctx.socket(zmq.REQ) sock.connect(self.download_addr) sock.send_pyobj((SERVER_CLEAR_ITEM, uuid)) sock.recv_pyobj() sock.close() def fetch(self, uuid, compressed_size): start_download_manager() self.start() value = self.cache.get(uuid) if value is not None: return value blocks = _download_manager.get_blocks(uuid) if blocks is None: blocks = self.fetch_blocks(uuid, compressed_size) value = self.from_blocks(uuid, blocks) return value @staticmethod def _get_blocks_by_filename(file_name, block_map): fp = open(file_name, 'rb') buf = fp.read() blocks = [buf[offset: offset + size] for offset, size in block_map] fp.close() return blocks def fetch_blocks(self, uuid, compressed_size): if uuid in self.shared_uuid_fn_dict: return self._get_blocks_by_filename(self.shared_uuid_fn_dict[uuid], self.shared_uuid_map_dict[uuid]) download_sock = self.ctx.socket(zmq.REQ) download_sock.connect(self.download_addr) download_sock.send_pyobj((DATA_GET, (uuid, compressed_size))) res = download_sock.recv_pyobj() if res == DATA_GET_OK: return self._get_blocks_by_filename(self.shared_uuid_fn_dict[uuid], self.shared_uuid_map_dict[uuid]) if res == DATA_GET_FAIL: raise RuntimeError('Data GET failed for uuid:%s' % uuid) while True: with self.download_cond: if uuid not in self.shared_uuid_fn_dict: self.download_cond.wait() else: break if uuid in self.shared_uuid_fn_dict: return self._get_blocks_by_filename(self.shared_uuid_fn_dict[uuid], self.shared_uuid_map_dict[uuid]) else: raise RuntimeError('get blocks failed') def to_blocks(self, uuid, obj): try: if marshalable(obj): buf = marshal.dumps((uuid, obj)) type_ = MARSHAL_TYPE else: buf = cPickle.dumps((uuid, obj), -1) type_ = PICKLE_TYPE except Exception: buf = cPickle.dumps((uuid, obj), -1) type_ = PICKLE_TYPE checksum = binascii.crc32(buf) & 0xFFFF stream = struct.pack(self.header_fmt, type_, checksum) + buf blockNum = (len(stream) + (BLOCK_SIZE - 1)) >> BLOCK_SHIFT blocks = [compress(stream[i * BLOCK_SIZE:(i + 1) * BLOCK_SIZE]) for i in range(blockNum)] sizes = [len(block) for block in blocks] size_l = accumulate_list(sizes) block_map = list(izip(size_l[:-1], sizes)) return blocks, size_l[-1], block_map def from_blocks(self, uuid, blocks): stream = b''.join(map(decompress, blocks)) type_, checksum = struct.unpack(self.header_fmt, stream[:self.header_len]) buf = stream[self.header_len:] _checksum = binascii.crc32(buf) & 0xFFFF if _checksum != checksum: raise RuntimeError('Wrong blocks: checksum: %s, expected: %s' % ( _checksum, checksum)) if type_ == MARSHAL_TYPE: _uuid, value = marshal.loads(buf) elif type_ == PICKLE_TYPE: _uuid, value = cPickle.loads(buf) else: raise RuntimeError('Unknown serialization type: %s' % type_) if uuid != _uuid: raise RuntimeError('Wrong blocks: uuid: %s, expected: %s' % (_uuid, uuid)) return value def shutdown(self): if not self._started: return self._started = False _manager = BroadcastManager() _download_manager = DownloadManager() _guide_manager = GuideManager() def start_guide_manager(): _guide_manager.start() def start_download_manager(): _download_manager.start() def stop_manager(): _manager.shutdown() _download_manager.shutdown() _guide_manager.shutdown() env.environ.pop(GUIDE_ADDR, None) env.environ.pop(DOWNLOAD_ADDR, None) class Broadcast(object): def __init__(self, value): assert value is not None, 'broadcast object should not been None' self.uuid = str(uuid_pkg.uuid4()) self.value = value self.compressed_size = _manager.register(self.uuid, self.value) block_num = (self.compressed_size + BLOCK_SIZE - 1) >> BLOCK_SHIFT self.bytes = block_num * BLOCK_SIZE logger.info("broadcast %s in %d blocks, %d bytes", self.uuid, block_num, self.compressed_size) def clear(self): _manager.clear(self.uuid) def __getstate__(self): return self.uuid, self.compressed_size def __setstate__(self, v): self.uuid, self.compressed_size = v def __getattr__(self, name): if name != 'value': return getattr(self.value, name) t = time.time() value = _manager.fetch(self.uuid, self.compressed_size) if value is None: raise RuntimeError("fetch broadcast failed") env.task_stats.secs_broadcast += time.time() - t self.value = value return value def __len__(self): return len(self.value) def __iter__(self): return self.value.__iter__() def __getitem__(self, key): return self.value.__getitem__(key) def __contains__(self, item): return self.value.__contains__(item) def __missing__(self, key): return self.value.__missing__(key) def __reversed__(self): return self.value.__reversed__()
douban/dpark
dpark/broadcast.py
Python
bsd-3-clause
24,223
0.001238
""" QuarkPlayer, a Phonon media player Copyright (C) 2008-2009 Tanguy Krotoff <tkrotoff@gmail.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys, os, glob import ftplib def ftp_upload_files(host, login, password, host_path, files_to_upload): """ Uploads files onto a FTP server in binary mode. """ ftp = ftplib.FTP(host) ftp.login(login, password) for file_to_upload in files_to_upload: print 'upload {0} to ftp://{1}@{2}:{3}'.format(file_to_upload, login, host, host_path) file = open(file_to_upload, 'rb') destpath = os.path.join(host_path, os.path.basename(file_to_upload)) ftp.storbinary('STOR ' + destpath, file) mode = '644' print 'chmod {0} {1}'.format(mode, file_to_upload) ftp.voidcmd('SITE CHMOD ' + mode + ' ' + destpath) ftp.quit() if __name__ == "__main__": loginfile = 'login.txt' file = open(loginfile, 'r') login = file.readline().strip() password = file.readline().strip() file.close() os.remove(loginfile) files_to_upload = [] for i, pattern in enumerate(sys.argv): if i > 0: # Fix a bug under Windows, # this script gets called with these arguments: # ['upload_package.py', "'*.exe'", "'*.deb'", "'*.rpm'"] # instead of ['upload_package.py', '*.exe', '*.deb', '*.rpm'] pattern = pattern.replace('\'', '') pattern = pattern.replace('\"', '') files_to_upload.extend(glob.glob(pattern)) ftp_upload_files('192.168.0.12', login, password, '/var/www/snapshots/', files_to_upload) for file_to_upload in files_to_upload: print 'rm {0}'.format(file_to_upload) os.remove(file_to_upload)
tkrotoff/QuarkPlayer
buildbot/upload_package.py
Python
gpl-3.0
2,173
0.018408
from django import forms from .models import MemberRSVP class EventAttendeeForm(forms.ModelForm): id = forms.IntegerField(widget=forms.HiddenInput) worked_on = forms.CharField(widget=forms.Textarea(attrs={ 'cols': '35', 'rows': '5' })) class Meta: model = MemberRSVP fields = ('id', 'worked_on',)
DjangoNYC/squid
squid/core/forms.py
Python
mit
352
0
"""This program is used to generate the coefficients c00, c01 and c11 used in the demo.""" # Copyright (C) 2007-2009 Anders Logg # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see <http://www.gnu.org/licenses/>. # # First added: 2009-12-16 # Last changed: 2009-12-16 from dolfin import * # Create mesh mesh = UnitSquareMesh(32, 32) # Create mesh functions for c00, c01, c11 c00 = MeshFunction("double", mesh, 2) c01 = MeshFunction("double", mesh, 2) c11 = MeshFunction("double", mesh, 2) # Iterate over mesh and set values for cell in cells(mesh): if cell.midpoint().x() < 0.5: c00[cell] = 1.0 c01[cell] = 0.3 c11[cell] = 2.0 else: c00[cell] = 3.0 c01[cell] = 0.5 c11[cell] = 4.0 # Store to file mesh_file = File("mesh.xml.gz") c00_file = File("c00.xml.gz") c01_file = File("c01.xml.gz") c11_file = File("c11.xml.gz") mesh_file << mesh c00_file << c00 c01_file << c01 c11_file << c11 # Plot mesh functions plot(c00, title="C00") plot(c01, title="C01") plot(c11, title="C11") interactive()
alogg/dolfin
demo/undocumented/tensor-weighted-poisson/python/generate_data.py
Python
gpl-3.0
1,642
0
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de # Aplicacion de las TIC basadas en Fuentes Abiertas, Spain. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # Neither the name of the CENATIC nor the names of its contributors # may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # You may contact the copyright holder at: Fundacion CENATIC, Edificio # de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200 # Almendralejo (Badajoz), Spain from DBSlayer import Query def get_type_name (type_id): l = get_type (type_id) if not l: return None return l['name'] def get_type (type_id): q = "SELECT id, type "\ "FROM asset_types WHERE id=%(type_id)s;" % locals() query = Query(q) if len(query) != 1: return None ret = {'id': type_id, 'name': query['type'][0]} return ret def get_types (): q = "SELECT id, type "\ "FROM asset_types;" % locals() query = Query(q) if not len(query): return None ret = [] for x in query: d={'id': query[x]['id'], 'name': query[x]['type']} ret.append(d) return ret def test (): import sys try: type_id = sys.argv[1] except IndexError: print 'Required test parameters: type_id' sys.exit(1) print 'Types:', get_types() print 'type_id %s, type_name %s' % (type_id, get_type_name(type_id)) print get_type(type_id), if __name__ == '__main__': test()
helix84/activae
src/Type.py
Python
bsd-3-clause
2,833
0.003883
# -*- coding: utf-8 -*- # # genologics-sql documentation build configuration file, created by # sphinx-quickstart on Wed Jan 27 15:17:17 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex import sphinx_rtd_theme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('../../')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'genologics-sql' copyright = u'2016, Denis Moreno' author = u'Denis Moreno' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0.1' # The full version, including alpha/beta/rc tags. release = '0.0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'alabaster' html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'genologics-sqldoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'genologics-sql.tex', u'genologics-sql Documentation', u'Denis Moreno', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'genologics-sql', u'genologics-sql Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'genologics-sql', u'genologics-sql Documentation', author, 'genologics-sql', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
Galithil/genologics_sql
doc/source/conf.py
Python
mit
9,443
0.006036
from unittest import TestCase from dark.simplify import simplifyTitle class SimplifyTitle(TestCase): """ Tests for the dark.simplify.simplifyTitle function. """ def testEmptyTitle(self): """ Simplifying an empty title with a non-empty target should return an empty title. """ self.assertEqual('', simplifyTitle('', 'xxx')) def testEmtpyTitleWithEmptyTarget(self): """ Simplifying an empty title should return an empty title. """ self.assertEqual('', simplifyTitle('', '')) def testPrefix(self): """ When the target is a prefix, the title up to the target (including the whole word that has the prefix) should be returned. """ self.assertEqual( 'Funny sea lion polyoma', simplifyTitle('Funny sea lion polyomavirus 1 CSL6994', 'polyoma')) def testSuffix(self): """ When the target is a suffix, the title up to the target (including the whole word that has the suffix) should be returned. """ self.assertEqual( 'Funny sea lion polyomavirus', simplifyTitle('Funny sea lion polyomavirus 1 CSL6994', 'virus')) def testContained(self): """ When the target is contained, the title up to the target (including the prefix of the word that has the target) should be returned. """ self.assertEqual( 'Funny sea lion polyoma', simplifyTitle('Funny sea lion polyomavirus 1 CSL6994', 'yoma')) def testExact(self): """ When the target is the same as a word in the title, the title up to and including the target should be returned. """ self.assertEqual( 'Funny sea lion', simplifyTitle('Funny sea lion polyomavirus 1 CSL6994', 'lion'))
bamueh/dark-matter
test/test_simplify.py
Python
mit
1,892
0
from django import forms from apu.models import Persona class FormularioContactos(forms.Form): asunto=forms.CharField() email=forms.EmailField(required=False) mensaje=forms.CharField() class PersonaForm(forms.ModelForm): nombre = forms.CharField(max_length=50,help_text="nombre Persona") dni = forms.CharField(max_length=9,help_text="dni Persona") pais = forms.CharField(max_length=20,help_text="pais Persona") equipo = forms.CharField(max_length=10,help_text="equipo Persona") hobbies = forms.TextField(max_length=200,help_text="hobbies Persona") #password = models.PasswordField(max_length=15) fondo = forms.IntegerField() class Meta: model = Persona fields = ('nombre','dni','pais','equipo','hobbies','fondo')
javiergarridomellado/ej5
apu/forms.py
Python
gpl-2.0
745
0.02953
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example Airflow DAG for Google Cloud Dataflow service """ import os from airflow import models from airflow.providers.google.cloud.operators.dataflow import DataflowStartFlexTemplateOperator from airflow.utils.dates import days_ago GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project") DATAFLOW_FLEX_TEMPLATE_JOB_NAME = os.environ.get( 'GCP_DATAFLOW_FLEX_TEMPLATE_JOB_NAME', "dataflow-flex-template" ) # For simplicity we use the same topic name as the subscription name. PUBSUB_FLEX_TEMPLATE_TOPIC = os.environ.get( 'GCP_DATAFLOW_PUBSUB_FLEX_TEMPLATE_TOPIC', "dataflow-flex-template" ) PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION = PUBSUB_FLEX_TEMPLATE_TOPIC GCS_FLEX_TEMPLATE_TEMPLATE_PATH = os.environ.get( 'GCP_DATAFLOW_GCS_FLEX_TEMPLATE_TEMPLATE_PATH', "gs://INVALID BUCKET NAME/samples/dataflow/templates/streaming-beam-sql.json", ) BQ_FLEX_TEMPLATE_DATASET = os.environ.get('GCP_DATAFLOW_BQ_FLEX_TEMPLATE_DATASET', 'airflow_dataflow_samples') BQ_FLEX_TEMPLATE_LOCATION = os.environ.get('GCP_DATAFLOW_BQ_FLEX_TEMPLATE_LOCATION>', 'us-west1') with models.DAG( dag_id="example_gcp_dataflow_flex_template_java", start_date=days_ago(1), schedule_interval=None, # Override to match your needs ) as dag_flex_template: # [START howto_operator_start_template_job] start_flex_template = DataflowStartFlexTemplateOperator( task_id="start_flex_template_streaming_beam_sql", body={ "launchParameter": { "containerSpecGcsPath": GCS_FLEX_TEMPLATE_TEMPLATE_PATH, "jobName": DATAFLOW_FLEX_TEMPLATE_JOB_NAME, "parameters": { "inputSubscription": PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION, "outputTable": f"{GCP_PROJECT_ID}:{BQ_FLEX_TEMPLATE_DATASET}.streaming_beam_sql", }, } }, do_xcom_push=True, location=BQ_FLEX_TEMPLATE_LOCATION, ) # [END howto_operator_start_template_job]
nathanielvarona/airflow
airflow/providers/google/cloud/example_dags/example_dataflow_flex_template.py
Python
apache-2.0
2,774
0.001802
""" pygments.lexers._postgres_builtins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Self-updating data files for PostgreSQL lexer. :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # Autogenerated: please edit them if you like wasting your time. KEYWORDS = ( 'ABORT', 'ABSOLUTE', 'ACCESS', 'ACTION', 'ADD', 'ADMIN', 'AFTER', 'AGGREGATE', 'ALL', 'ALSO', 'ALTER', 'ALWAYS', 'ANALYSE', 'ANALYZE', 'AND', 'ANY', 'ARRAY', 'AS', 'ASC', 'ASSERTION', 'ASSIGNMENT', 'ASYMMETRIC', 'AT', 'ATTACH', 'ATTRIBUTE', 'AUTHORIZATION', 'BACKWARD', 'BEFORE', 'BEGIN', 'BETWEEN', 'BIGINT', 'BINARY', 'BIT', 'BOOLEAN', 'BOTH', 'BY', 'CACHE', 'CALL', 'CALLED', 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CHAIN', 'CHAR', 'CHARACTER', 'CHARACTERISTICS', 'CHECK', 'CHECKPOINT', 'CLASS', 'CLOSE', 'CLUSTER', 'COALESCE', 'COLLATE', 'COLLATION', 'COLUMN', 'COLUMNS', 'COMMENT', 'COMMENTS', 'COMMIT', 'COMMITTED', 'CONCURRENTLY', 'CONFIGURATION', 'CONFLICT', 'CONNECTION', 'CONSTRAINT', 'CONSTRAINTS', 'CONTENT', 'CONTINUE', 'CONVERSION', 'COPY', 'COST', 'CREATE', 'CROSS', 'CSV', 'CUBE', 'CURRENT', 'CURRENT_CATALOG', 'CURRENT_DATE', 'CURRENT_ROLE', 'CURRENT_SCHEMA', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'CURSOR', 'CYCLE', 'DATA', 'DATABASE', 'DAY', 'DEALLOCATE', 'DEC', 'DECIMAL', 'DECLARE', 'DEFAULT', 'DEFAULTS', 'DEFERRABLE', 'DEFERRED', 'DEFINER', 'DELETE', 'DELIMITER', 'DELIMITERS', 'DEPENDS', 'DESC', 'DETACH', 'DICTIONARY', 'DISABLE', 'DISCARD', 'DISTINCT', 'DO', 'DOCUMENT', 'DOMAIN', 'DOUBLE', 'DROP', 'EACH', 'ELSE', 'ENABLE', 'ENCODING', 'ENCRYPTED', 'END', 'ENUM', 'ESCAPE', 'EVENT', 'EXCEPT', 'EXCLUDE', 'EXCLUDING', 'EXCLUSIVE', 'EXECUTE', 'EXISTS', 'EXPLAIN', 'EXPRESSION', 'EXTENSION', 'EXTERNAL', 'EXTRACT', 'FALSE', 'FAMILY', 'FETCH', 'FILTER', 'FIRST', 'FLOAT', 'FOLLOWING', 'FOR', 'FORCE', 'FOREIGN', 'FORWARD', 'FREEZE', 'FROM', 'FULL', 'FUNCTION', 'FUNCTIONS', 'GENERATED', 'GLOBAL', 'GRANT', 'GRANTED', 'GREATEST', 'GROUP', 'GROUPING', 'GROUPS', 'HANDLER', 'HAVING', 'HEADER', 'HOLD', 'HOUR', 'IDENTITY', 'IF', 'ILIKE', 'IMMEDIATE', 'IMMUTABLE', 'IMPLICIT', 'IMPORT', 'IN', 'INCLUDE', 'INCLUDING', 'INCREMENT', 'INDEX', 'INDEXES', 'INHERIT', 'INHERITS', 'INITIALLY', 'INLINE', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INSTEAD', 'INT', 'INTEGER', 'INTERSECT', 'INTERVAL', 'INTO', 'INVOKER', 'IS', 'ISNULL', 'ISOLATION', 'JOIN', 'KEY', 'LABEL', 'LANGUAGE', 'LARGE', 'LAST', 'LATERAL', 'LEADING', 'LEAKPROOF', 'LEAST', 'LEFT', 'LEVEL', 'LIKE', 'LIMIT', 'LISTEN', 'LOAD', 'LOCAL', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATION', 'LOCK', 'LOCKED', 'LOGGED', 'MAPPING', 'MATCH', 'MATERIALIZED', 'MAXVALUE', 'METHOD', 'MINUTE', 'MINVALUE', 'MODE', 'MONTH', 'MOVE', 'NAME', 'NAMES', 'NATIONAL', 'NATURAL', 'NCHAR', 'NEW', 'NEXT', 'NFC', 'NFD', 'NFKC', 'NFKD', 'NO', 'NONE', 'NORMALIZE', 'NORMALIZED', 'NOT', 'NOTHING', 'NOTIFY', 'NOTNULL', 'NOWAIT', 'NULL', 'NULLIF', 'NULLS', 'NUMERIC', 'OBJECT', 'OF', 'OFF', 'OFFSET', 'OIDS', 'OLD', 'ON', 'ONLY', 'OPERATOR', 'OPTION', 'OPTIONS', 'OR', 'ORDER', 'ORDINALITY', 'OTHERS', 'OUT', 'OUTER', 'OVER', 'OVERLAPS', 'OVERLAY', 'OVERRIDING', 'OWNED', 'OWNER', 'PARALLEL', 'PARSER', 'PARTIAL', 'PARTITION', 'PASSING', 'PASSWORD', 'PLACING', 'PLANS', 'POLICY', 'POSITION', 'PRECEDING', 'PRECISION', 'PREPARE', 'PREPARED', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURAL', 'PROCEDURE', 'PROCEDURES', 'PROGRAM', 'PUBLICATION', 'QUOTE', 'RANGE', 'READ', 'REAL', 'REASSIGN', 'RECHECK', 'RECURSIVE', 'REF', 'REFERENCES', 'REFERENCING', 'REFRESH', 'REINDEX', 'RELATIVE', 'RELEASE', 'RENAME', 'REPEATABLE', 'REPLACE', 'REPLICA', 'RESET', 'RESTART', 'RESTRICT', 'RETURNING', 'RETURNS', 'REVOKE', 'RIGHT', 'ROLE', 'ROLLBACK', 'ROLLUP', 'ROUTINE', 'ROUTINES', 'ROW', 'ROWS', 'RULE', 'SAVEPOINT', 'SCHEMA', 'SCHEMAS', 'SCROLL', 'SEARCH', 'SECOND', 'SECURITY', 'SELECT', 'SEQUENCE', 'SEQUENCES', 'SERIALIZABLE', 'SERVER', 'SESSION', 'SESSION_USER', 'SET', 'SETOF', 'SETS', 'SHARE', 'SHOW', 'SIMILAR', 'SIMPLE', 'SKIP', 'SMALLINT', 'SNAPSHOT', 'SOME', 'SQL', 'STABLE', 'STANDALONE', 'START', 'STATEMENT', 'STATISTICS', 'STDIN', 'STDOUT', 'STORAGE', 'STORED', 'STRICT', 'STRIP', 'SUBSCRIPTION', 'SUBSTRING', 'SUPPORT', 'SYMMETRIC', 'SYSID', 'SYSTEM', 'TABLE', 'TABLES', 'TABLESAMPLE', 'TABLESPACE', 'TEMP', 'TEMPLATE', 'TEMPORARY', 'TEXT', 'THEN', 'TIES', 'TIME', 'TIMESTAMP', 'TO', 'TRAILING', 'TRANSACTION', 'TRANSFORM', 'TREAT', 'TRIGGER', 'TRIM', 'TRUE', 'TRUNCATE', 'TRUSTED', 'TYPE', 'TYPES', 'UESCAPE', 'UNBOUNDED', 'UNCOMMITTED', 'UNENCRYPTED', 'UNION', 'UNIQUE', 'UNKNOWN', 'UNLISTEN', 'UNLOGGED', 'UNTIL', 'UPDATE', 'USER', 'USING', 'VACUUM', 'VALID', 'VALIDATE', 'VALIDATOR', 'VALUE', 'VALUES', 'VARCHAR', 'VARIADIC', 'VARYING', 'VERBOSE', 'VERSION', 'VIEW', 'VIEWS', 'VOLATILE', 'WHEN', 'WHERE', 'WHITESPACE', 'WINDOW', 'WITH', 'WITHIN', 'WITHOUT', 'WORK', 'WRAPPER', 'WRITE', 'XML', 'XMLATTRIBUTES', 'XMLCONCAT', 'XMLELEMENT', 'XMLEXISTS', 'XMLFOREST', 'XMLNAMESPACES', 'XMLPARSE', 'XMLPI', 'XMLROOT', 'XMLSERIALIZE', 'XMLTABLE', 'YEAR', 'YES', 'ZONE', ) DATATYPES = ( 'bigint', 'bigserial', 'bit', 'bit varying', 'bool', 'boolean', 'box', 'bytea', 'char', 'character', 'character varying', 'cidr', 'circle', 'date', 'decimal', 'double precision', 'float4', 'float8', 'inet', 'int', 'int2', 'int4', 'int8', 'integer', 'interval', 'json', 'jsonb', 'line', 'lseg', 'macaddr', 'macaddr8', 'money', 'numeric', 'path', 'pg_lsn', 'pg_snapshot', 'point', 'polygon', 'real', 'serial', 'serial2', 'serial4', 'serial8', 'smallint', 'smallserial', 'text', 'time', 'timestamp', 'timestamptz', 'timetz', 'tsquery', 'tsvector', 'txid_snapshot', 'uuid', 'varbit', 'varchar', 'with time zone', 'without time zone', 'xml', ) PSEUDO_TYPES = ( 'any', 'anyarray', 'anycompatible', 'anycompatiblearray', 'anycompatiblenonarray', 'anycompatiblerange', 'anyelement', 'anyenum', 'anynonarray', 'anyrange', 'cstring', 'event_trigger', 'fdw_handler', 'index_am_handler', 'internal', 'language_handler', 'pg_ddl_command', 'record', 'table_am_handler', 'trigger', 'tsm_handler', 'unknown', 'void', ) # Remove 'trigger' from types PSEUDO_TYPES = tuple(sorted(set(PSEUDO_TYPES) - set(map(str.lower, KEYWORDS)))) PLPGSQL_KEYWORDS = ( 'ALIAS', 'CONSTANT', 'DIAGNOSTICS', 'ELSIF', 'EXCEPTION', 'EXIT', 'FOREACH', 'GET', 'LOOP', 'NOTICE', 'OPEN', 'PERFORM', 'QUERY', 'RAISE', 'RETURN', 'REVERSE', 'SQLSTATE', 'WHILE', ) if __name__ == '__main__': # pragma: no cover import re try: from urllib import urlopen except ImportError: from urllib.request import urlopen from pygments.util import format_lines # One man's constant is another man's variable. SOURCE_URL = 'https://github.com/postgres/postgres/raw/master' KEYWORDS_URL = SOURCE_URL + '/src/include/parser/kwlist.h' DATATYPES_URL = SOURCE_URL + '/doc/src/sgml/datatype.sgml' def update_myself(): content = urlopen(DATATYPES_URL).read().decode('utf-8', errors='ignore') data_file = list(content.splitlines()) datatypes = parse_datatypes(data_file) pseudos = parse_pseudos(data_file) content = urlopen(KEYWORDS_URL).read().decode('utf-8', errors='ignore') keywords = parse_keywords(content) update_consts(__file__, 'DATATYPES', datatypes) update_consts(__file__, 'PSEUDO_TYPES', pseudos) update_consts(__file__, 'KEYWORDS', keywords) def parse_keywords(f): kw = [] for m in re.finditer(r'PG_KEYWORD\("(.+?)"', f): kw.append(m.group(1).upper()) if not kw: raise ValueError('no keyword found') kw.sort() return kw def parse_datatypes(f): dt = set() for line in f: if '<sect1' in line: break if '<entry><type>' not in line: continue # Parse a string such as # time [ (<replaceable>p</replaceable>) ] [ without time zone ] # into types "time" and "without time zone" # remove all the tags line = re.sub("<replaceable>[^<]+</replaceable>", "", line) line = re.sub("<[^>]+>", "", line) # Drop the parts containing braces for tmp in [t for tmp in line.split('[') for t in tmp.split(']') if "(" not in t]: for t in tmp.split(','): t = t.strip() if not t: continue dt.add(" ".join(t.split())) dt = list(dt) dt.sort() return dt def parse_pseudos(f): dt = [] re_start = re.compile(r'\s*<table id="datatype-pseudotypes-table">') re_entry = re.compile(r'\s*<entry><type>(.+?)</type></entry>') re_end = re.compile(r'\s*</table>') f = iter(f) for line in f: if re_start.match(line) is not None: break else: raise ValueError('pseudo datatypes table not found') for line in f: m = re_entry.match(line) if m is not None: dt.append(m.group(1)) if re_end.match(line) is not None: break else: raise ValueError('end of pseudo datatypes table not found') if not dt: raise ValueError('pseudo datatypes not found') dt.sort() return dt def update_consts(filename, constname, content): with open(filename) as f: data = f.read() # Line to start/end inserting re_match = re.compile(r'^%s\s*=\s*\($.*?^\s*\)$' % constname, re.M | re.S) m = re_match.search(data) if not m: raise ValueError('Could not find existing definition for %s' % (constname,)) new_block = format_lines(constname, content) data = data[:m.start()] + new_block + data[m.end():] with open(filename, 'w', newline='\n') as f: f.write(data) update_myself()
dscorbett/pygments
pygments/lexers/_postgres_builtins.py
Python
bsd-2-clause
12,184
0.000246
# Copyright 2014 The Swarming Authors. All rights reserved. # Use of this source code is governed by the Apache v2.0 license that can be # found in the LICENSE file. """Imports groups from some external tar.gz bundle or plain text list. External URL should serve *.tar.gz file with the following file structure: <external group system name>/<group name>: userid userid ... For example ldap.tar.gz may look like: ldap/trusted-users: jane joe ... ldap/all: jane joe ... Each tarball may have groups from multiple external systems, but groups from some external system must not be split between multiple tarballs. When importer sees <external group system name>/* in a tarball, it modifies group list from that system on the server to match group list in the tarball _exactly_, including removal of groups that are on the server, but no longer present in the tarball. Plain list format should have one userid per line and can only describe a single group in a single system. Such groups will be added to 'external/*' groups namespace. Removing such group from importer config will remove it from service too. """ import collections import contextlib import logging import StringIO import tarfile from google.appengine.api import app_identity from google.appengine.ext import ndb from components import auth from components import utils from components.auth import model class BundleImportError(Exception): """Base class for errors while fetching external bundle.""" class BundleFetchError(BundleImportError): """Failed to fetch the archive from remote URL.""" def __init__(self, url, status_code, content): super(BundleFetchError, self).__init__() self.url = url self.status_code = status_code self.content = content def __str__(self): return 'Request to %s failed with code %d:\n%r' % ( self.url, self.status_code, self.content) class BundleUnpackError(BundleImportError): """Failed to untar the archive.""" def __init__(self, inner_exc): super(BundleUnpackError, self).__init__() self.inner_exc = inner_exc def __str__(self): return 'Not a valid tar archive: %s' % self.inner_exc class BundleBadFormatError(BundleImportError): """Group file in bundle has invalid format.""" def __init__(self, inner_exc): super(BundleBadFormatError, self).__init__() self.inner_exc = inner_exc def __str__(self): return 'Bundle contains invalid group file: %s' % self.inner_exc def config_key(): """Key of GroupImporterConfig singleton entity.""" return ndb.Key('GroupImporterConfig', 'config') class GroupImporterConfig(ndb.Model): """Singleton entity with group importer configuration JSON.""" config = ndb.JsonProperty() modified_by = auth.IdentityProperty(indexed=False) modified_ts = ndb.DateTimeProperty(auto_now=True, indexed=False) def is_valid_config(config): """Checks config for correctness.""" if not isinstance(config, list): return False seen_systems = set(['external']) seen_groups = set() for item in config: if not isinstance(item, dict): return False # 'format' is an optional string describing the format of the imported # source. The default format is 'tarball'. fmt = item.get('format', 'tarball') if fmt not in ['tarball', 'plainlist']: return False # 'url' is a required string: where to fetch groups from. url = item.get('url') if not url or not isinstance(url, basestring): return False # 'oauth_scopes' is an optional list of strings: used when generating OAuth # access_token to put in Authorization header. oauth_scopes = item.get('oauth_scopes') if oauth_scopes is not None: if not all(isinstance(x, basestring) for x in oauth_scopes): return False # 'domain' is an optional string: will be used when constructing emails from # naked usernames found in imported groups. domain = item.get('domain') if domain and not isinstance(domain, basestring): return False # 'tarball' format uses 'systems' and 'groups' fields. if fmt == 'tarball': # 'systems' is a required list of strings: group systems expected to be # found in the archive (they act as prefixes to group names, e.g 'ldap'). systems = item.get('systems') if not systems or not isinstance(systems, list): return False if not all(isinstance(x, basestring) for x in systems): return False # There should be no overlap in systems between different bundles. if set(systems) & seen_systems: return False seen_systems.update(systems) # 'groups' is an optional list of strings: if given, filters imported # groups only to this list. groups = item.get('groups') if groups and not all(isinstance(x, basestring) for x in groups): return False elif fmt == 'plainlist': # 'group' is a required name of imported group. The full group name will # be 'external/<group>'. group = item.get('group') if not group or not isinstance(group, basestring) or group in seen_groups: return False seen_groups.add(group) else: assert False, 'Unreachable' return True def read_config(): """Returns currently stored config or [] if not set.""" e = config_key().get() return (e.config if e else []) or [] def write_config(config): """Updates stored configuration.""" if not is_valid_config(config): raise ValueError('Invalid config') e = GroupImporterConfig( key=config_key(), config=config, modified_by=auth.get_current_identity()) e.put() def import_external_groups(): """Refetches all external groups. Runs as a cron task. Raises BundleImportError in case of import errors. """ # Missing config is not a error. config = read_config() if not config: logging.info('Not configured') return if not is_valid_config(config): raise BundleImportError('Bad config') # Fetch all files specified in config in parallel. futures = [fetch_file_async(p['url'], p.get('oauth_scopes')) for p in config] # {system name -> group name -> list of identities} bundles = {} for p, future in zip(config, futures): fmt = p.get('format', 'tarball') # Unpack tarball into {system name -> group name -> list of identities}. if fmt == 'tarball': fetched = load_tarball( future.get_result(), p['systems'], p.get('groups'), p.get('domain')) assert not ( set(fetched) & set(bundles)), (fetched.keys(), bundles.keys()) bundles.update(fetched) continue # Add plainlist group to 'external/*' bundle. if fmt == 'plainlist': group = load_group_file(future.get_result(), p.get('domain')) name = 'external/%s' % p['group'] if 'external' not in bundles: bundles['external'] = {} assert name not in bundles['external'], name bundles['external'][name] = group continue assert False, 'Unreachable' # Nothing to process? if not bundles: return @ndb.transactional def snapshot_groups(): """Fetches all existing groups and AuthDB revision number.""" groups = model.AuthGroup.query(ancestor=model.root_key()).fetch_async() return auth.get_auth_db_revision(), groups.get_result() @ndb.transactional def apply_import(revision, entities_to_put, keys_to_delete): """Transactionally puts and deletes a bunch of entities.""" # DB changed between transactions, retry. if auth.get_auth_db_revision() != revision: return False # Apply mutations, bump revision number. futures = [] futures.extend(ndb.put_multi_async(entities_to_put)) futures.extend(ndb.delete_multi_async(keys_to_delete)) ndb.Future.wait_all(futures) if any(f.get_exception() for f in futures): raise ndb.Rollback() auth.replicate_auth_db() return True # Try to apply the change until success or deadline. Split transaction into # two (assuming AuthDB changes infrequently) to avoid reading and writing too # much stuff from within a single transaction (and to avoid keeping the # transaction open while calculating the diff). while True: # Use same timestamp everywhere to reflect that groups were imported # atomically within a single transaction. ts = utils.utcnow() entities_to_put = [] keys_to_delete = [] revision, existing_groups = snapshot_groups() for system, groups in bundles.iteritems(): to_put, to_delete = prepare_import(system, existing_groups, groups, ts) entities_to_put.extend(to_put) keys_to_delete.extend(to_delete) if not entities_to_put and not keys_to_delete: break if apply_import(revision, entities_to_put, keys_to_delete): break logging.info('Groups updated: %d', len(entities_to_put) + len(keys_to_delete)) def load_tarball(content, systems, groups, domain): """Unzips tarball with groups and deserializes them. Args: content: byte buffer with *.tar.gz data. systems: names of external group systems expected to be in the bundle. groups: list of group name to extract, or None to extract all. domain: email domain to append to naked user ids. Returns: Dict {system name -> {group name -> list of identities}}. Raises: BundleImportError on errors. """ bundles = collections.defaultdict(dict) try: # Expected filenames are <external system name>/<group name>, skip # everything else. for filename, fileobj in extract_tar_archive(content): chunks = filename.split('/') if len(chunks) != 2 or not auth.is_valid_group_name(filename): logging.warning('Skipping file %s, not a valid name', filename) continue if groups is not None and filename not in groups: continue system = chunks[0] if system not in systems: logging.warning('Skipping file %s, not allowed', filename) continue # Do not catch BundleBadFormatError here and in effect reject the whole # bundle if at least one group file is broken. That way all existing # groups will stay intact. Simply ignoring broken group here will cause # the importer to remove it completely. bundles[system][filename] = load_group_file(fileobj.read(), domain) except tarfile.TarError as exc: raise BundleUnpackError('Not a valid tar archive: %s' % exc) return dict(bundles.iteritems()) def load_group_file(body, domain): """Given body of imported group file returns list of Identities. Raises BundleBadFormatError if group file is malformed. """ members = [] for uid in body.strip().splitlines(): try: ident = auth.Identity( auth.IDENTITY_USER, '%s@%s' % (uid, domain) if domain else uid) members.append(ident) except ValueError as exc: raise BundleBadFormatError(exc) return sorted(members, key=lambda x: x.to_bytes()) @ndb.tasklet def fetch_file_async(url, oauth_scopes): """Fetches a file optionally using OAuth2 for authentication. Args: url: url to a file to fetch. oauth_scopes: list of OAuth scopes to use when generating access_token for accessing |url|, if not set or empty - do not use OAuth. Returns: Byte buffer with file's body. Raises: BundleImportError on fetch errors. """ if utils.is_local_dev_server(): protocols = ('http://', 'https://') else: protocols = ('https://',) assert url.startswith(protocols), url headers = {} if oauth_scopes: headers['Authorization'] = 'OAuth %s' % ( app_identity.get_access_token(oauth_scopes)[0]) ctx = ndb.get_context() result = yield ctx.urlfetch( url=url, method='GET', headers=headers, follow_redirects=False, deadline=5*60, validate_certificate=True) if result.status_code != 200: raise BundleFetchError(url, result.status_code, result.content) raise ndb.Return(result.content) def extract_tar_archive(content): """Given a body of tar.gz file yields pairs (file name, file obj).""" stream = StringIO.StringIO(content) with tarfile.open(mode='r|gz', fileobj=stream) as tar: for item in tar: if item.isreg(): with contextlib.closing(tar.extractfile(item)) as extracted: yield item.name, extracted def prepare_import(system_name, existing_groups, imported_groups, timestamp): """Prepares lists of entities to put and delete to apply group import. Args: system_name: name of external groups system being imported (e.g. 'ldap'), all existing groups belonging to that system will be replaced with |imported_groups|. existing_groups: ALL existing groups. imported_groups: dict {imported group name -> list of identities}. timestamp: modification timestamp to set on all touched entities. Returns: (List of entities to put, list of keys to delete). """ # Return values of this function. to_put = [] to_delete = [] # Pick only groups that belong to |system_name|. system_groups = { g.key.id(): g for g in existing_groups if g.key.id().startswith('%s/' % system_name) } def clear_group(group_name): ent = system_groups[group_name] if ent.members: ent.members = [] ent.modified_ts = timestamp ent.modified_by = auth.get_service_self_identity() to_put.append(ent) def delete_group(group_name): to_delete.append(system_groups[group_name].key) def create_group(group_name): ent = model.AuthGroup( key=model.group_key(group_name), members=imported_groups[group_name], created_ts=timestamp, created_by=auth.get_service_self_identity(), modified_ts=timestamp, modified_by=auth.get_service_self_identity()) to_put.append(ent) def update_group(group_name): existing = system_groups[group_name] imported = imported_groups[group_name] if existing.members != imported: existing.members = imported existing.modified_ts = timestamp existing.modified_by = auth.get_service_self_identity() to_put.append(existing) # Delete groups that are no longer present in the bundle. If group is # referenced somewhere, just clear its members list (to avoid creating # inconsistency in group inclusion graph). for group_name in (set(system_groups) - set(imported_groups)): if any(group_name in g.nested for g in existing_groups): clear_group(group_name) else: delete_group(group_name) # Create new groups. for group_name in (set(imported_groups) - set(system_groups)): create_group(group_name) # Update existing groups. for group_name in (set(imported_groups) & set(system_groups)): update_group(group_name) return to_put, to_delete
pombreda/swarming
appengine/auth_service/common/importer.py
Python
apache-2.0
14,804
0.010875
# Generated by Django 2.2.14 on 2020-07-30 12:09 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0028_auto_20200615_0811'), ] operations = [ migrations.AddField( model_name='user', name='verified_email', field=models.BooleanField(default=True), ), ]
taigaio/taiga-back
taiga/users/migrations/0029_user_verified_email.py
Python
agpl-3.0
391
0
#!/usr/bin/env python # -*- coding: utf-8 -*- # Modules used for ETL - Create User # Modules required: import os import xmlrpclib, sys, csv, ConfigParser from openerp.tools.status_history import status from datetime import datetime # ----------------------------------------------------------------------------- # Set up parameters (for connection to Open ERP Database) # ----------------------------------------------------------------------------- # Startup from config file: config = ConfigParser.ConfigParser() file_config = os.path.expanduser('~/ETL/generalfood/openerp.cfg') config.read([file_config]) dbname = config.get('dbaccess','dbname') user = config.get('dbaccess','user') pwd = config.get('dbaccess','pwd') server = config.get('dbaccess','server') port = config.get('dbaccess','port') # verify if it's necessary: getint separator = eval(config.get('dbaccess','separator')) # test log_only_error = eval(config.get('log','error')) # log only error in function # Startup from code: default_error_data = "2014/07/30" default_product_id = 1921 # for lot creation (acceptation) default_lot_id = 92710 # ERR log_file = os.path.expanduser("~/ETL/generalfood/log/%s.txt" % (datetime.now())) log = open(log_file, 'w') # ----------------------------------------------------------------------------- # XMLRPC connection # ----------------------------------------------------------------------------- sock = xmlrpclib.ServerProxy( 'http://%s:%s/xmlrpc/common' % (server, port), allow_none=True) uid = sock.login(dbname ,user ,pwd) sock = xmlrpclib.ServerProxy( 'http://%s:%s/xmlrpc/object' % (server, port), allow_none=True) # ----------------------------------------------------------------------------- # Utility function # ----------------------------------------------------------------------------- def format_string(valore): try: valore = valore.decode('cp1252') except: tmp = "" for i in valore: try: tmp += i.decode('cp1252') except: pass # jump char valore = tmp valore = valore.encode('utf-8') return valore.strip() def format_date(valore,date=True): ''' Formatta nella data di PG ''' try: if date: mga = valore.strip().split(' ')[0].split('/') # only date (not time) year = int(mga[2]) if year < 100: if year > 50: year += 1900 else: year += 2000 return '%4d-%02d-%02d' % (year, int(mga[0]), int(mga[1])) except: return False def format_currency(valore): ''' Formatta nel float per i valori currency ''' try: return float(valore.strip().split(' ')[-1].replace(',','.')) except: return 0.0 def format_boolean(value): ''' Formatta le stringhe '0' e '1' in boolean True e False ''' return value == '1' def log_event(*event): ''' Log event and comunicate with print ''' if log_only_error and event[0][:5] == "[INFO": return log.write("%s. %s\r\n" % (datetime.now(), event)) print event return def create_partner(partner_code, type_of_partner, default_dict): ''' Create simple element for partner not found (write after in default_dict new element) ''' try: field = "sql_%s_code" % type_of_partner partner_ids = sock.execute(dbname, uid, pwd, "res.partner", "search", [(field, '=', partner_code)]) if partner_ids: partner_id = partner_ids[0] else: data = { 'name': "Partner %s (from migration)" % (partner_code), field: partner_code, 'sql_import': True, } if type_of_partner == 'customer': data['ref'] = partner_code data['customer'] = True elif type_of_partner == 'supplier': data['supplier'] = True elif type_of_partner == 'destination': data['is_address'] = True partner_id = sock.execute(dbname, uid, pwd, "res.partner", 'create', data) log_event("[WARN] %s partner created: %s" % (type_of_partner, partner_code)) default_dict[partner_code] = partner_id return partner_id except: log_event("[ERROR] Error creating %s partner: %s" % (type_of_partner, partner_code)) return False def get_or_create_partner(partner_code, type_of_partner, mandatory, res_partner_customer, res_partner_supplier): ''' Try to get partner element or create a simple element if not present ''' if type_of_partner == 'customer': default_dict = res_partner_customer elif type_of_partner == 'supplier': default_dict = res_partner_supplier elif type_of_partner == 'destination': default_dict = res_partner_customer # search in customer dict else: default_dict = {} # nothing partner_id = default_dict.get(partner_code, False) if not partner_id: # create e simple element partner_id = create_partner(partner_code, type_of_partner, default_dict) if mandatory and not partner_id: log_event("[ERROR] %s partner not found: %s" % ( type_of_partner, partner_code)) return partner_id # ----------------------------------------------------------------------------- # Importazioni qualifiche fornitore # ----------------------------------------------------------------------------- qualifications = { '1': 'full', # Piena qualitica '2': 'reserve', # Con riserva '3': 'discarded', # Scartato '4': 'uneventful', # Non movimentato '5': 'test', # In prova '6': 'occasional', # Occasionale } # ----------------------------------------------------------------------------- # Importazioni comunicazioni # ----------------------------------------------------------------------------- comunications = { '1': 1, # Cliente '2': 2, # Fornitore '3': 3, # ASL } # ----------------------------------------------------------------------------- # Importazioni gravità # ----------------------------------------------------------------------------- gravity = { '1': 2, # Grave '2': 3, # Importante '3': 1, # Secondario } # ----------------------------------------------------------------------------- # Importazioni origin # ----------------------------------------------------------------------------- origin = { '1': 1, # Ordine '2': 2, # Magazzino '3': 3, # Fornitore '4': 4, # Cliente '5': 5, # Trasporto '6': 6, # Fatturazione '7': 7, # Non definibile '8': 8, # Commerciale '9': 9, # Logistica '10': 10, # Confezionamento '11': 11, # Acquisti } # ----------------------------------------------------------------------------- # Importazioni cause # ----------------------------------------------------------------------------- cause = { '1': 1, # Igiene '2': 2, # Qualità '3': 3, # Quantità '4': 4, # Ritardo '5': 5, # Prodotto sbagliato '6': 6, # Confezione '7': 7, # Errore cliente '8': 8, # Prezzo '9': 9, # Non definibile '10': 10, # Glassatura '11': 11, # Temperatura '12': 12, # Pezzatura '13': 13, # Corpi estranei/Contaminati '14': 14, # Mancanza prodotto risp a bolla '15': 15, # Rottura di stock } # ----------------------------------------------------------------------------- # Importazioni Sampling plan # ----------------------------------------------------------------------------- plan = { '1': 1, # Bieta erbetta '3': 2, # Broccoli calabri IGF '4': 3, # Carote Baby e rondelle '6': 4, # Cavolfiore '7': 5, # Carciofi '9': 6, # Patate crocchette '11': 7, # Fagiolini '12': 8, # Finocchi '13': 9, # Minestrone '16': 10, # Patate '18': 11, # Piselli '19': 12, # Spinaci '20': 13, # Zucchine '21': 14, # Halibut '22': 15, # Bastoncini '23': 16, # Calamari '25': 17, # Cozze '26': 18, # Merluzzo '27': 19, # Palombo '28': 20, # Platessa '29': 21, # Seppie '30': 22, # Trota '31': 23, # Coscette pollo '32': 24, # Pollo '33': 25, # Suino '35': 26, # Peperoni '38': 27, # Tacchino '39': 28, # Asparagi '40': 29, # Macinato '41': 30, # Pesce spada '42': 31, # Mais '43': 32, # Pangasio '44': 33, # Aromi e sedano } # ----------------------------------------------------------------------------- # Importazioni Origin (action) >> (Uso stessa anagrafica per camp.) # ----------------------------------------------------------------------------- origin_action = { '1': 'direction', # Riesame della direzione '2': 'audit', # Audit interno '3': 'claim', # Reclamo '4': 'nc', # Rapporto di non conformità '5': 'other', # Altro } stock_production_lot = {} lot_ids = sock.execute(dbname, uid, pwd, 'stock.production.lot', 'search', []) for lot in sock.execute(dbname, uid, pwd, 'stock.production.lot', 'read', lot_ids, ['id','name']): stock_production_lot[lot['name']] = lot['id'] # ----------------------------------------------------------------------------- # Importazione Classi fornitore # ----------------------------------------------------------------------------- only_create = True jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Classi.txt') openerp_object = 'quality.partner.class' log_event("Start import %s" % openerp_object) quality_partner_class = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 continue if len(line): access_id = line[0] name = format_string(line[1]) # Start of importation: counter['tot'] += 1 # test if record exists (basing on Ref. as code of Partner) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'name': name, 'access_id': access_id, } if item: # already exist counter['upd'] += 1 try: if only_create: log_event("[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", name) else: item_mod = sock.execute(dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, name) quality_partner_class[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: # new counter['new'] += 1 try: openerp_id=sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, name) quality_partner_class[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise #Exception("Errore di importazione!") # Scrivo l'errore per debug store = status(openerp_object) if jump_because_imported: quality_partner_class = store.load() else: store.store(quality_partner_class) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # Importazione Clienti # ----------------------------------------------------------------------------- only_create = True jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Clienti.txt') openerp_object = 'res.partner' log_event("Start import %s (customer)" % openerp_object) res_partner_customer = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 continue if len(line): access_c_id = line[0] code = format_string(line[1]) name = format_string(line[2]) # Start of importation: counter['tot'] += 1 # test if record exists (basing on Ref. as code of Partner) if code[:2] == '06': search_key = 'sql_customer_code' destination = False else: search_key = 'sql_destination_code' destination = True item = sock.execute( dbname, uid, pwd, openerp_object , 'search', [ #('access_c_id', '=', access_c_id), (search_key, '=', code), ]) if not item: log_event( "[WARNING] Customer/Destination not found " "(must be yet imported)", data, ) # continue # TODO lo creo lo stesso per ora data = { 'name': "%s%s" % (name, "" if item else " [*]"), # Creato da importazione) 'is_company': True, 'access_c_id': access_c_id, 'customer': True, # for link sql importation search_key: code, #'sql_customer_code' 'sql_import': True, } if destination: data['is_address'] = True # parent_id = ?? TODO if item: counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "No Write", openerp_object, " (jumped only_create clause: ", code) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, code) res_partner_customer[code] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, code) res_partner_customer[code] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise store = status('%sc' % openerp_object) if jump_because_imported: res_partner_customer = store.load() else: store.store(res_partner_customer) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # Importazione Fornitori # ----------------------------------------------------------------------------- only_create = True jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Fornitori.txt') openerp_object = 'res.partner' log_event("Start import %s (supplier)" % openerp_object) res_partner_supplier = {} lines = csv.reader(open(file_input,'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped: %s" % ( counter['tot'], max_col, len(line), line)) continue access_s_id = line[0] code = format_string(line[1]) name = format_string(line[2]) quality_class_code = format_string(line[3]) quality_activity = format_string(line[11]) quality_product = format_string(line[12]) quality_rating_info = format_string(line[13]) quality_commercial_reference = format_string(line[14]) quality_update_date = format_date(line[15]) quality_start_supplier = format_date(line[33]) quality_end_supplier = format_date(line[34]) quality_class_id = quality_partner_class.get( quality_class_code, False) # Start of importation: counter['tot'] += 1 # test if record exists (basing on Ref. as code of Partner) item = sock.execute( dbname, uid, pwd, openerp_object , 'search', [ #('access_s_id', '=', access_s_id), ('sql_supplier_code', '=', code), ]) if not item: log_event( "[WARNING] Supplier not found (must be yet imported)", data, ) #continue data = { 'name': name, 'is_company': True, 'access_s_id': access_s_id, 'supplier': True, 'quality_class_id': quality_class_id, 'quality_activity': quality_activity, 'quality_product': quality_product, 'quality_rating_info': quality_rating_info, 'quality_commercial_reference': quality_commercial_reference, 'quality_update_date': quality_update_date, 'quality_start_supplier': quality_start_supplier, 'quality_end_supplier': quality_end_supplier, # for link sql importation 'sql_supplier_code': code, 'sql_import': True, } if item: counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", code) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, code) #res_partner_supplier[access_s_id] = item[0] res_partner_supplier[code] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, code) #res_partner_supplier[access_s_id] = openerp_id res_partner_supplier[code] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise store = status('%ss' % openerp_object) if jump_because_imported: res_partner_supplier = store.load() else: store.store(res_partner_supplier) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # Importazione Qualifiche fornitore # ----------------------------------------------------------------------------- only_create = True jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Qualifiche.txt') openerp_object = 'quality.supplier.rating' log_event("Start import %s" % openerp_object) # Non storicizzati in dict lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): counter['tot'] += 1 if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % ( counter['tot'], max_col, len(line))) continue access_id = line[0] supplier_code = format_string(line[1]) qualification_code = format_string(line[2]) name = format_string(line[3]) date = format_date(line[4]) type_code = format_string(line[5]).upper() deadline = format_date(line[6]) obsolete = format_boolean(line[7]) # Convert foreign key: if type_code == "P": type_id = 'first' elif type_code == 'R': type_id = 'renewal' else: type_id = False partner_id = res_partner_supplier.get(supplier_code, False) if not partner_id: # Creo se non esiste partner_id = get_or_create_partner(supplier_code, 'supplier', True, res_partner_customer, res_partner_supplier) if not partner_id: log_event("[ERROR] Partner not found, jumped! %s" % (line)) continue qualification = qualifications.get(qualification_code, False) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'name': name, 'date': date, 'type': type_id, 'deadline': deadline, 'obsolete': obsolete, 'qualification': qualification, 'partner_id': partner_id, 'access_id': access_id, } if item: counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", supplier_code) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, supplier_code) #quality_claim[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: counter['new'] += 1 try: openerp_id=sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, name) #quality_claim[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # Certificazioni fornitore # ----------------------------------------------------------------------------- only_create = True jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Certificazioni.txt') openerp_object = 'quality.supplier.certification' log_event("Start import %s" % openerp_object) lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): counter['tot'] += 1 if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % ( counter['tot'], max_col, len(line))) continue access_id = line[0] supplier_code = format_string(line[1]) entity = format_date(line[2]) rule = format_string(line[3]) note = format_string(line[4]) # purpose date = format_date(line[5]) deadline = format_date(line[6]) number = format_string(line[7]) # Convert foreign key: partner_id = res_partner_supplier.get(supplier_code, False) if not partner_id: partner_id = get_or_create_partner(supplier_code, 'supplier', True, res_partner_customer, res_partner_supplier) if not partner_id: log_event("[ERROR] Partner not found, jumped! %s" % (line)) continue item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'date': date, 'entity': entity, # 'name': # TODO esiste??? 'deadline': deadline, 'note': note, 'rule': rule, 'number': number, 'partner_id': partner_id, 'access_id': access_id, } if item: counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", supplier_code) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, supplier_code) #quality_claim[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, supplier_code) #quality_claim[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # Referenze - Andamenti Qualifiche fornitore # ----------------------------------------------------------------------------- only_create = True jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Andamenti.txt') openerp_object = 'quality.supplier.reference' log_event("Start import %s" % openerp_object) lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): counter['tot'] += 1 if len(line) != max_col: log_event("[ERROR] %s Different cols not %s now %s! Jumped:" % ( counter['tot'], max_col, len(line))) continue access_id = line[0] supplier_code = format_string(line[1]) date = format_date(line[2]) note = format_string(line[3]) # Convert foreign key: partner_id = res_partner_supplier.get(supplier_code, False) if not partner_id: partner_id = get_or_create_partner(supplier_code, 'supplier', True, res_partner_customer, res_partner_supplier) if not partner_id: log_event("[ERROR] Partner not found, jumped! %s" % (line)) continue item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { #'name': name, # TODO non esiste!!! 'date': date, 'note': note, 'partner_id': partner_id, 'access_id': access_id, } if item: counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", supplier_code) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, supplier_code) #quality_claim[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, supplier_code) #quality_claim[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # Verifiche fornitore # ----------------------------------------------------------------------------- only_create = True jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Verifiche.txt') openerp_object = 'quality.supplier.check' log_event("Start import %s" % openerp_object) lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): counter['tot'] += 1 if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % ( counter['tot'], max_col, len(line))) continue access_id = line[0] supplier_code = format_string(line[1]) date = format_date(line[2]) name = format_string(line[3]) note = format_string(line[4]) # Convert foreign key: partner_id = res_partner_supplier.get(supplier_code, False) if not partner_id: partner_id = get_or_create_partner(supplier_code, 'supplier', True, res_partner_customer, res_partner_supplier) if not partner_id: log_event("[ERROR] Partner not found, jumped! %s" % (line)) continue item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'date': date, 'name': name, 'note': note, 'partner_id': partner_id, 'access_id': access_id, } if item: counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", supplier_code) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, supplier_code) #quality_claim[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, supplier_code) #quality_claim[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # PRECARICAMENTI # ----------------------------------------------------------------------------- # RECLAMI --------------------------------------------------------------------- jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Rapporti.txt') openerp_object = 'quality.claim' log_event("Start preload import %s" % openerp_object) quality_claim = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} if not jump_because_imported: try: for line in lines: counter['tot'] += 1 if counter['tot'] <= 0: continue if len(line): access_id = line[0] ref = "REC%05d" % (int(format_string(line[1]) or '0')) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'ref': ref, 'name': ref, # TODO not correct 'access_id': access_id, 'partner_id': 1, # TODO not correct } if item: quality_claim[access_id] = item[0] else: try: quality_claim[access_id] = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event("[INFO] %s. Create %s ref: %s" % ( counter['tot'], openerp_object, ref)) except: log_event( "[ERROR] Error creating, record: %s " % line) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_claim = store.load() else: store.store(quality_claim) log_event("Total %(tot)s" % counter) # NON CONFORMITA' ------------------------------------------------------------- jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Conformità.txt') openerp_object = 'quality.conformed' log_event("Start preload import %s" % openerp_object) quality_conformed = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 if not jump_because_imported: try: for line in lines: try: counter['tot'] += 1 if counter['tot'] <= 0: max_col = len(line) continue if len(line): if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped: %s" % ( counter['tot'], max_col, len(line), counter['tot'])) continue access_id = line[0] ref = "NC%05d" % (int(format_string(line[4]) or '0')) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'ref': ref, 'access_id': access_id, 'gravity_id': 2, #TODO da correggere } if item: quality_conformed[access_id] = item[0] else: try: quality_conformed[access_id] = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event("[INFO] %s. Create %s ref: %s" % ( counter['tot'], openerp_object, ref)) except: log_event( "[ERROR] Error creating, record: %s " % line) except: log_event('[ERROR] %s. Error importing data: %s' % (counter['tot'], sys.exc_info())) continue except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_conformed = store.load() else: store.store(quality_conformed) log_event("Total %(tot)s" % counter) # CAMPIONAMENTI --------------------------------------------------------------- jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Campionatura.txt') openerp_object = 'quality.sampling' log_event("Start preload import %s" % openerp_object) quality_sampling = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 if not jump_because_imported: try: for line in lines: counter['tot'] += 1 if counter['tot'] <= 0: max_col = len(line) continue if len(line): if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped: %s" % ( counter['tot'], max_col, len(line), counter['tot'])) continue access_id = line[0] ref = "SAM%05d" % (int(format_string(line[4]) or '0')) fake_lot = 91131 item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'ref': ref, 'access_id': access_id, 'date': '2014-06-25', 'lot_id': fake_lot, } if item: quality_sampling[access_id] = item[0] else: try: quality_sampling[access_id] = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event("[INFO] %s. Create %s ref: %s" % ( counter['tot'], openerp_object, ref)) except: log_event( "[ERROR] Error creating, record: %s " % line) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_sampling = store.load() else: store.store(quality_sampling) log_event("Total %(tot)s" % counter) # AZIONI --------------------------------------------------------------- jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Azioni.txt') openerp_object = 'quality.action' log_event("Start preload import %s" % openerp_object) quality_action = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} if not jump_because_imported: try: for line in lines: counter['tot'] += 1 if counter['tot'] <= 0: continue if len(line): access_id = line[0] ref = "ACP%05d" % (int(format_string(line[1]) or '0')) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'ref': ref, 'access_id': access_id, } if item: quality_action[access_id] = item[0] else: try: quality_action[access_id] = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event("[INFO] %s. Create %s ref: %s" % ( counter['tot'], openerp_object, ref)) except: log_event( "[ERROR] Error creating, record: %s " % line) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_action = store.load() else: store.store(quality_action) log_event("Total %(tot)s" % counter) # ----------------------------------------------------------------------------- # RECLAMI # ----------------------------------------------------------------------------- only_create = False jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Rapporti.txt') openerp_object = 'quality.claim' log_event("Start import %s" % openerp_object) quality_claim = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} old_claim = False try: lot = {1: {}, 2: {}, 3: {}, } for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 continue if len(line): access_id = line[0] name = format_string(line[1]) date = format_date(line[2]) partner_code = format_string(line[3]) partner_ref = format_string(line[6]) receive_user_code = format_string(line[12]) subject = format_string(line[13]) request_return = format_boolean(line[14]) RTR_request = format_boolean(line[16]) analysis = format_string(line[17]) origin_code = format_string(line[36]) cause_code = format_string(line[37]) responsability = format_string(line[38]) solution = format_string(line[39]) gravity_code = format_string(line[40]) need_accredit = format_boolean(line[41]) SFA_saw = format_boolean(line[42]) NC_ref = format_string(line[43]) closed_date = format_date(line[46]) action_code = format_string(line[57]) sampling_code = format_string(line[60]) ref_claim = int(name or '0') if not old_claim: old_claim = ref_claim else: old_claim += 1 if old_claim != ref_claim: log_event("[ERROR] old_rec=%s rec_claim=%s (hole in list)" % ( old_claim, ref_claim)) old_claim = ref_claim ref = "REC%05d" % (ref_claim) customer_ref = False # non esiste il codice di rif NC cliente? if need_accredit and not NC_ref: NC_ref = "Nessun riferimento" lot[1]['lot'] = format_string(line[20]) lot[2]['lot'] = format_string(line[26]) lot[3]['lot'] = format_string(line[32]) lot[1]['product'] = format_string(line[23]) lot[2]['product'] = format_string(line[29]) lot[3]['product'] = format_string(line[35]) lot[1]['supplier'] = format_string(line[21]) lot[2]['supplier'] = format_string(line[27]) lot[3]['supplier'] = format_string(line[33]) lot[1]['date'] = format_date(line[18]) lot[2]['date'] = format_date(line[24]) lot[3]['date'] = format_date(line[30]) lot[1]['qty_return'] = format_currency(line[19]) lot[2]['qty_return'] = format_currency(line[25]) lot[3]['qty_return'] = format_currency(line[31]) receive_user_id = 1 # Anagrafiche semplici: origin_id = origin.get(origin_code, False) cause_id = cause.get(cause_code, False) gravity_id = gravity.get(gravity_code, False) # Documenti collegati: action_id = quality_action.get(action_code, False) sampling_id = quality_sampling.get(sampling_code, False) # Trova partner ed eventuale destinazione partner_id = False partner_address_id = False if partner_code[:2] == '06': partner_id = get_or_create_partner(partner_code, 'customer', False, res_partner_customer, res_partner_supplier) elif partner_code[:2] == '07': partner_address_id = get_or_create_partner(partner_code, 'destination', False, res_partner_customer, res_partner_supplier) partner_id = partner_address_id # TODO cercare il partner della destinazione if not partner_id: partner_id = 1 log_event("[WARNING] [%s] Correggere il partner, reclamo: %s" % ( ref, partner_code)) # Start of importation: counter['tot'] += 1 data = { 'name': "%s..." % subject[:50], 'ref': ref, 'customer_ref': customer_ref, # codice cliente della NC (non esiste) 'date': date, 'receive_user_id': receive_user_id, 'subject': subject, 'analysis': analysis, 'responsability': responsability, 'solution': solution, 'partner_id': partner_id, 'partner_ref': partner_ref, # contatto dal cliente 'partner_address_id': partner_address_id, 'request_return': request_return, 'RTR_request': RTR_request, 'NC_ref': NC_ref, 'SFA_saw': SFA_saw, 'origin_id': origin_id, 'cause_id': cause_id, 'gravity_id': gravity_id, 'closed_date': closed_date, 'action_id': action_id, 'sampling_id': sampling_id, 'need_accredit': need_accredit, 'access_id': access_id, } # test if record exists (basing on Ref. as code of Partner) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) if item: # already exist counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", ref) else: try: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, ref) except: log_event( "[ERR] %s Write data %s", counter['tot'], data) quality_claim[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: # new counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, ref) quality_claim[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) if action_id: sock.execute(dbname, uid, pwd, 'quality.action', 'write', action_id, { 'claim_id' : quality_claim[access_id], 'origin': 'claim', }) if sampling_id: sock.execute(dbname, uid, pwd, 'quality.sampling', 'write', sampling_id, { 'claim_id' : quality_claim[access_id], 'origin': 'claim', }) # NOTE: NC nel vecchio programma non c'erano quindi non sono state aggiornate le genesi #importazione dei lotti for key in lot: try: lot_name = lot[key]['lot'] # number if lot_name and lot_name != '0': lot_id = stock_production_lot.get(lot_name) if not lot_id: #log_event("[ERROR] No Lot, jump: %s" % lot_name) # no comunication continue lot_access_id = '%s%s' % (access_id, key) data = { 'lot_id': lot_id, 'return_date': lot[key]['date'], 'return_qty': lot[key]['qty_return'], 'claim_id': quality_claim[access_id], 'real_lot_id': lot_id, 'access_id': lot_access_id, } lot_id = sock.execute(dbname, uid, pwd, 'quality.claim.product' , 'search', [ ('access_id', '=', lot_access_id)]) else: #log_event("[ERROR] No Lot, jump: %s" % lot_name) # no comunication continue except: log_event("[ERROR] generic error (lot part) %s" % ( sys.exc_info())) continue if lot_id: # already exist try: sock.execute( dbname, uid, pwd, 'quality.claim.product', 'write', lot_id, data) except: log_event("[ERROR] Modifing lot %s [%s]" % ( key, data)) else: # new try: sock.execute( dbname, uid, pwd, 'quality.claim.product', 'create', data) except: log_event( "[ERROR] Error creating lot %s [%s]" % ( key, data)) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_claim = store.load() else: store.store(quality_claim) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # NOT CONFORMED # ----------------------------------------------------------------------------- only_create = False jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Conformità.txt') openerp_object = 'quality.conformed' log_event("Start import %s" % openerp_object) quality_conformed = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: treatment = { 1: {'type': 'accept_exception'}, 2: {'type': 'discard'}, 3: {'type': 'make_supplier'}, } comunication = { 1: {'type': 1}, # Customer 2: {'type': 2}, # Supplier 3: {'type': 3}, # ASL } for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): counter['tot'] += 1 if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped: %s" % ( counter['tot'], max_col, len(line), line)) continue access_id = line[0] sampling_code = format_string(line[1]) action_code = format_string(line[2]) ref = "NC%05d" % (int(format_string(line[4]) or '0')) insert_date = format_date(line[5]) quantity = format_boolean(line[6]) sanitation = format_boolean(line[7]) aesthetic_packaging = format_boolean(line[8]) name = format_string(line[9]) # origin = format_string(line[9]) # TODO (posizione?) #genesis_1 = format_boolean(line[11]) #genesis_2 = format_boolean(line[12]) treatment[1]['treatment'] = format_boolean(line[13]) treatment[2]['treatment'] = format_boolean(line[14]) treatment[3]['treatment'] = format_boolean(line[15]) treatment[1]['qty'] = format_currency(line[18]) treatment[2]['qty'] = format_currency(line[19]) treatment[3]['qty'] = format_currency(line[20]) treatment[1]['note'] = format_string(line[21]) treatment[2]['note'] = format_string(line[22]) treatment[3]['note'] = format_string(line[23]) comunication[1]['comunication'] = format_boolean(line[25]) # Cli comunication[2]['comunication'] = format_boolean(line[24]) # For comunication[3]['comunication'] = format_boolean(line[26]) # ASL comunication[1]['protocol'] = format_string(line[29]) # Cli comunication[2]['protocol'] = format_string(line[27]) # For comunication[3]['protocol'] = format_string(line[28]) # ASL note_RAQ = format_string(line[30]) lot_code = format_string(line[33]) ddt_ref = format_string(line[34]) #genesis_3 = format_boolean(line[36]) cancel = format_boolean(line[37]) stock_note = format_string(line[38]) #genesis_4 = format_boolean(line[39]) gravity_code = format_string(line[40]) sampling_id = quality_sampling.get(sampling_code, False) action_id = quality_action.get(action_code, False) gravity_id = gravity.get(gravity_code, 2) #TODO da cambiare il default lot_id = stock_production_lot.get(lot_code) if not lot_id: log_event("[ERROR] %s Lot not found %s, temp replaced ID=%s" % ( counter['tot'], lot_code, ref)) lot_id = default_lot_id '''if genesis_1: genesis = 'acceptance' elif genesis_2: genesis = 'sample' elif genesis_3: genesis = 'claim' elif genesis_4: genesis = 'packaging' else: genesis = 'other' ''' # Start of importation: # test if record exists (basing on Ref. as code of Partner) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'name': name, 'ref': ref, 'insert_date': insert_date, 'aesthetic_packaging': aesthetic_packaging, 'quantity': quantity, 'sanitation': sanitation, 'gravity_id': gravity_id, #'genesis': genesis, #TODO Spostare tutto nel campo origin #'origin': origin, #TODO da ricavare alla fine 'ddt_ref': ddt_ref, 'lot_id': lot_id, 'note_RAQ': note_RAQ, 'cancel': cancel, #'claim_id': claim_id, 'sampling_id': sampling_id, #'acceptation_id': acceptation_id, 'action_id': action_id, 'access_id': access_id, 'stock_note': stock_note, } if item: # already exist counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", name) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, name) quality_conformed[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", counter['tot'], data) continue else: # new counter['new'] += 1 try: openerp_id=sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, name) quality_conformed[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", counter['tot'], data) continue if action_id: sock.execute(dbname, uid, pwd, 'quality.action', 'write', action_id, { 'conformed_id' : quality_conformed[access_id], # non è parent_ 'origin': 'nc', # TODO corretto? }) if sampling_id: # corretto manualmente sock.execute(dbname, uid, pwd, 'quality.sampling', 'write', sampling_id, { 'parent_conformed_id' : quality_conformed[access_id], 'origin': 'nc', # TODO corretto? }) #Creazione trattamenti: for key in treatment: if treatment[key]['treatment']: treat_access_id = '%s%s' % (access_id, key) data = { 'type': treatment[key]['type'], 'name': treatment[key]['note'], 'qty': treatment[key]['qty'], 'conformed_id': quality_conformed[access_id], 'access_id': treat_access_id, } treat_id = sock.execute(dbname, uid, pwd, 'quality.treatment' , 'search', [ ('access_id', '=', treat_access_id)]) if treat_id: # already exist try: sock.execute( dbname, uid, pwd, 'quality.treatment', 'write', treat_id, data) except: log_event("[ERROR] Modifing treat%s" % key) else: # new try: sock.execute( dbname, uid, pwd, 'quality.treatment', 'create', data) except: log_event( "[ERROR] Error creating treat%s" % key) #Creazione comunicazioni for key in comunication: if comunication[key]['comunication']: comunication_access_id = '%s%s' % (access_id, key) data = { 'type_id': comunication[key]['type'], 'prot_number': comunication[key]['protocol'], 'prot_date': insert_date, 'conformed_id': quality_conformed[access_id], 'access_id': comunication_access_id, } comunication_id = sock.execute(dbname, uid, pwd, 'quality.comunication' , 'search', [ ('access_id', '=', comunication_access_id)]) if comunication_id: # already exist try: sock.execute( dbname, uid, pwd, 'quality.comunication', 'write', comunication_id, data) except: log_event("[ERROR] Modifing comunication%s" % key) else: # new try: sock.execute( dbname, uid, pwd, 'quality.comunication', 'create', data) except: log_event( "[ERROR] Error creating comunication%s" % key) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_conformed = store.load() else: store.store(quality_conformed) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # CAMPIONAMENTI # ----------------------------------------------------------------------------- only_create = False jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Campionatura.txt') openerp_object = 'quality.sampling' log_event("Start import %s" % openerp_object) quality_sampling = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 sample_passed = [] sample_notpassed = [] tasters = {1: '', 2: '', 3: '', 4: ''} try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jump:" % ( counter['tot'], max_col, len(line))) continue access_id = line[0] closed = format_boolean(line[1]) # closed (sample) ref = format_string(line[2]) date = format_date(line[3]) lot_code = format_string(line[4]) # Spunta per fare l'esame: do_visual = format_boolean(line[8]) # ex 8 do_analysis = format_boolean(line[9]) # ex 10 do_taste = format_boolean(line[10]) # ex 9 do_glazing = format_boolean(line[11]) # ex 11 # Spunta per esito esame: visual_state = format_boolean(line[12]) # ex 12 analysis_state = format_boolean(line[13])# ex 14 taste_state = format_boolean(line[14]) # ex 13 glazing_state = format_boolean(line[15]) # ex 15 # Descrizioni esami: analysis = format_string(line[16]) taste = format_string(line[17]) visual = format_string(line[18]) weight_glazing = format_currency(line[19]) weight_drained = format_currency(line[20]) perc_glazing_indicated = format_currency(line[21]) perc_glazing_calculated = format_currency(line[22]) # Assaggiatori: tasters[1] = format_string(line[23]) tasters[2] = format_string(line[24]) tasters[3] = format_string(line[25]) tasters[4] = format_string(line[26]) passed = format_boolean(line[27]) # passed (sample) note = format_string(line[29]) conformed_code = format_string(line[36]) cancel = format_boolean(line[38]) sampling_plan_code = format_string(line[39]) ref = "SAM%05d" % (int(ref or '0')) lot_id = stock_production_lot.get(lot_code, False) if not lot_id: log_event("[ERROR] %s Lot not found (replaced with temp raplaced ID=%s) %s" % ( counter['tot'], lot_code, ref)) lot_id = default_lot_id conformed_id = quality_conformed.get(conformed_code, False) sampling_plan_id = plan.get(sampling_plan_code, False) if not date: date = data.get('date', default_error_data) # Start of importation: counter['tot'] += 1 # test if record exists (basing on Ref. as code of Partner) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'ref': ref, 'date': date, 'lot_id': lot_id, #'origin': origin, TODO (vedere se ricavabile per ora ci sono solo i reclami) 'conformed_id': conformed_id, # Check to do: 'do_visual': do_visual, 'do_analysis': do_analysis, 'do_glazing': do_glazing, 'do_taste': do_taste, # Text info: 'visual': visual, 'analysis': analysis, 'taste': taste, 'weight_glazing': weight_glazing, 'perc_glazing_indicated': perc_glazing_indicated, 'weight_drained': weight_drained, 'perc_glazing_calculated': perc_glazing_calculated, 'note': note, 'sampling_plan_id': sampling_plan_id, 'cancel': cancel, 'access_id': access_id, } if closed: data['visual_state'] = 'passed' if visual_state else 'not_passed' data['analysis_state'] = 'passed' if analysis_state else 'not_passed' data['taste_state'] = 'passed' if taste_state else 'not_passed' data['glazing_state'] = 'passed' if glazing_state else 'not_passed' else: data['visual_state'] = 'passed' if visual_state else 'to_examined' data['analysis_state'] = 'passed' if analysis_state else 'to_examined' data['taste_state'] = 'passed' if taste_state else 'to_examined' data['glazing_state'] = 'passed' if glazing_state else 'to_examined' if item: # already exist counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", ref) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, ref) quality_sampling[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: # new counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, ref) quality_sampling[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) if conformed_id: sock.execute(dbname, uid, pwd, 'quality.conformed', 'write', conformed_id, { 'parent_sampling_id' : quality_sampling[access_id], 'origin': 'sampling', }) # Aggiunta assaggiatori: for taste_id, taster in tasters.iteritems(): if taster: taster_access_id = "%s%s" % (access_id, taste_id) data = { 'name': taster, 'sample_id': quality_sampling[access_id] , 'access_id': taster_access_id, } taster_ids = sock.execute(dbname, uid, pwd, 'quality.sampling.taster', 'search', [ ('access_id', '=', taster_access_id)]) if taster_ids: taster_ids = sock.execute(dbname, uid, pwd, 'quality.sampling.taster' , 'write', taster_ids[0], data) else: taster_ids = sock.execute(dbname, uid, pwd, 'quality.sampling.taster', 'create', data) if closed: # test for WF (end of importation) if passed: sample_passed.append(quality_sampling[access_id]) else: sample_notpassed.append(quality_sampling[access_id]) else: if passed: sample_passed.append(quality_sampling[access_id]) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_sampling = store.load() else: store.store(quality_sampling) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # ACTION # ----------------------------------------------------------------------------- only_create = False jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Azioni.txt') openerp_object = 'quality.action' log_event("Start import %s" % openerp_object) #quality_action = {} # caricato nella fase pre (tolto perchè resetta e non ho il child) lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): if len(line) != max_col: log_event("[ERROR] %s ] counter['tot'], ] not %s but now %s! Jump:" % ( counter['tot'], max_col, len(line))) continue counter['tot'] += 1 access_id = line[0] ref = format_string(line[1]) date = format_date(line[2]) origin = format_string(line[3]) #TODO da fare alla fine note = format_string(line[4]) proposed_subject = format_string(line[5]) esit_date = format_date(line[6]) esit_note = format_string(line[7]) child_code = format_string(line[9]) #closed 10 closed_date = format_date(line[11]) proposing_entity = format_string(line[13]) action_type = format_string(line[16]) ref = "ACP%05d" % (int(ref or '0')) if action_type == "Azione Preventiva": action_type_id = 'preventive' elif action_type == "Intervento di Miglioramento": action_type_id = 'enhance' else: # action_type == "Azione Correttiva" or "" action_type_id = 'corrective' # default child_id = quality_action.get(child_code, False) origin = origin_action.get(origin, False) # test if record exists (basing on Ref. as code of Partner) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'ref': ref, 'date': date, 'origin': origin, 'note': note, 'proposed_subject': proposed_subject, 'proposing_entity': proposing_entity, 'esit_date': esit_date, 'closed_date': closed_date, 'esit_note': esit_note, 'child_id': child_id, 'type': action_type_id, 'access_id': access_id, } if item: # already exist counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", ref) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, ref) quality_action[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: # new counter['new'] += 1 try: openerp_id=sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, name) quality_action[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) if child_id: sock.execute(dbname, uid, pwd, 'quality.action', 'write', child_id, { 'parent_id' : quality_action[access_id], 'origin': data['origin'], # TODO Non importa }) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_action = store.load() else: store.store(quality_action) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # ACTION INTERVENT # ----------------------------------------------------------------------------- only_create = False jump_because_imported = True file_input = os.path.expanduser('~/ETL/generalfood/Interventi.txt') openerp_object = 'quality.action.intervent' log_event("Start import %s" % openerp_object) quality_action_intervent = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % ( counter['tot'], max_col, len(line))) continue access_id = line[0] action_code = format_string(line[1]) name = format_string(line[2]) manager_code = format_string(line[3]) deadline = format_date(line[4]) action_id = quality_action.get(action_code, False) manager_id = 1 # Start of importation: counter['tot'] += 1 # test if record exists (basing on Ref. as code of Partner) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'name': name, 'manager_id': manager_id, 'deadline': deadline, 'action_id': action_id, 'access_id': access_id, } if item: # already exist counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", access_id) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, access_id) quality_action_intervent[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: # new counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, access_id) quality_action_intervent[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_action_intervent = store.load() else: store.store(quality_action_intervent) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # ACCEPTATION # ----------------------------------------------------------------------------- only_create = False jump_because_imported = False file_input = os.path.expanduser('~/ETL/generalfood/Accettazioni.txt') openerp_object = 'quality.acceptation' log_event("Start import %s" % openerp_object) quality_acceptation = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): if len(line) != max_col: log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % ( counter['tot'], max_col, len(line))) continue counter['tot'] += 1 access_id = line[0] name = format_string(line[1]) date = format_date(line[2]) partner_code = format_string(line[3]) origin = format_string(line[5]) note = format_string(line[6]) cancel = format_boolean(line[11]) if not date: date = data.get('date', default_error_data) ref = "ACPT%05d" % (int(name or '0')) if partner_code: partner_id = get_or_create_partner(partner_code, 'supplier', False, res_partner_customer, res_partner_supplier) else: partner_id = False if not partner_id: log_event("[WARN] Partner not found in %s" % (ref)) # test if record exists (basing on Ref. as code of Partner) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) data = { 'ref': ref, 'date': date, 'origin': origin, 'partner_id': partner_id, 'note': note, 'cancel': cancel, 'access_id': access_id, } if item: # already exist counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ", name) else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object, name) quality_acceptation[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: # new counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object, name) quality_acceptation[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_acceptation = store.load() else: store.store(quality_acceptation) log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter) # ----------------------------------------------------------------------------- # ACCEPTATION DETAILS # ----------------------------------------------------------------------------- only_create = False jump_because_imported = False file_input = os.path.expanduser('~/ETL/generalfood/Dettagli.txt') openerp_object = 'quality.acceptation.line' log_event("Start import %s" % openerp_object) quality_acceptation_line = {} lines = csv.reader(open(file_input, 'rb'), delimiter=separator) counter = {'tot': -1, 'new': 0, 'upd': 0} max_col = 0 try: for line in lines: if jump_because_imported: break if counter['tot'] < 0: counter['tot'] += 1 max_col = len(line) continue if len(line): if len(line) != max_col: log_event("[ERROR] Different col not %s but now %s! Jumped:" % ( max_col, len(line))) continue counter['tot'] += 1 # Read line access_id = line[0] acceptation_code = format_string(line[1]) lot_code = format_string(line[2]) conformed_code = format_string(line[3]) qty_arrived = format_currency(line[4]) qty_expected = format_currency(line[5]) temp = format_boolean(line[6]) # Motivo label = format_boolean(line[7]) # Etichetta package = format_boolean(line[8]) # Stato #visual = format_boolean(line[9]) # Visivo expired = format_boolean(line[10]) # Scadenza motivation = format_string(line[11]) qty = format_boolean(line[12]) # Quantitativo quality = False # TODO esiste sul file da importare?? lot_id = False if not lot_code or lot_code == '0': log_event("[ERROR] Lot empty, jumped:", acceptation_code) continue lot_id = stock_production_lot.get(lot_code, False) if not lot_id: log_event("[ERROR] Lot not found, temp created:", lot_code) # Create lot (after will be updated from syncro with MySQL) lot_id = sock.execute(dbname, uid, pwd, 'stock.production.lot', 'create', { 'name': lot_code, 'product_id': default_product_id, 'date': datetime.now().strftime("%Y-%m-%d"), 'default_supplier_id': False }) # test if record exists (basing on Ref. as code of Partner) item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [ ('access_id', '=', access_id)]) if conformed_code and conformed_code != '0': conformed_id = quality_conformed.get('conformed_code', False) if not conformed_id: conformed_ids = sock.execute(dbname, uid, pwd, 'quality.conformed', 'search', [ ('access_id', '=', conformed_code)]) if conformed_ids: conformed_id = conformed_ids[0] else: log_event("[WARNING] Conformed_id not found, not write: %s" % counter['tot']) else: conformed_id = False #quality_conformed.get(conformed_code, False) acceptation_id = quality_acceptation.get(acceptation_code, False) if not acceptation_id: log_event("[ERROR] %s. No parent form: %s" % ( counter['tot'], acceptation_code)) continue data = { 'acceptation_id': acceptation_id, 'lot_id': lot_id, 'qty_arrived': qty_arrived, 'qty_expected': qty_expected, # Motivi check: 'qty': qty, 'temp': temp, 'label': label, 'package': package, 'expired': expired, #'qty_package': qty_package, 'conformed_id': conformed_id, 'motivation': motivation, 'access_id': access_id, } if item: # already exist counter['upd'] += 1 try: if only_create: log_event( "[INFO]", counter['tot'], "Write", openerp_object, " (jumped only_create clause: ") else: item_mod = sock.execute( dbname, uid, pwd, openerp_object, 'write', item, data) log_event( "[INFO]", counter['tot'], "Write", openerp_object) quality_acceptation_line[access_id] = item[0] except: log_event("[ERROR] Modifing data, current record:", data) else: # new counter['new'] += 1 try: openerp_id = sock.execute( dbname, uid, pwd, openerp_object, 'create', data) log_event( "[INFO]", counter['tot'], "Create", openerp_object) quality_acceptation_line[access_id] = openerp_id except: log_event( "[ERROR] Error creating data, current record: ", data) # Aggiorno il valore per il ritorno alla scheda accettazione if conformed_id: sock.execute(dbname, uid, pwd, 'quality.conformed', 'write', conformed_id, { 'acceptation_id' : acceptation_id, # Padre della riga 'origin': 'acceptation', }) except: log_event('[ERROR] Error importing data!') raise store = status(openerp_object) if jump_because_imported: quality_acceptation_line = store.load() else: store.store(quality_acceptation_line) # ----------------------------------------------------------------------------- # Trigger events: # ----------------------------------------------------------------------------- # ------------ # ACCEPTATION: # ------------ # TODO # ------- # CLAIMS: # ------- # Claim (bozza > opened) openerp_object = 'quality.claim' domain = [('state','=','draft')] field_list = ('id',) log_event('Start trigger WF Claim (bozza > open)') item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain) for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list): try: item_id = item['id'] sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_claim_draft_opened', item_id) log_event('[INFO] bozza > opened, ID: %s' % item_id) except: log_event('[ERROR] Impossibile bozza > opened, ID: %s' % item_id) log_event('End trigger WF Claim (bozza > open) record %s' % len(item_ids)) # Claim (opened > nc > done > close > saw ) domain = [('state', '=', 'opened'), ('need_accredit', '=', True)] field_list = ('id') log_event('Start trigger WF Claim (opened > nc > done > close > saw)') item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain) for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list): try: item_id = item['id'] sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_claim_opened_nc', item_id) sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_claim_nc_done', item_id) sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_claim_done_closed', item_id) sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_claim_closed_saw', item_id) log_event('[INFO] opened > nc > done > close > saw, ID: %s' % item_id) except: log_event('[ERROR] Impossibile opened > nc > done > close > saw, ID: %s' % item_id) log_event('End trigger WF Claim (opened > nc > done > close > saw) record %s' % len(item_ids)) # Claim (opened > closed > saw) domain = [('state', '=', 'opened')] field_list = ('id') log_event('Start trigger WF Claim (opened > closed > saw)') item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain) for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list): try: item_id = item['id'] sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_claim_opened_closed', item_id) sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_claim_closed_saw', item_id) log_event('[INFO] opened > closed > saw, ID: %s' % item_id) except: log_event('[ERROR] Impossibile opened > closed > saw, ID: %s' % item_id) log_event('End trigger WF Claim (opened > closed > saw) record %s' % len(item_ids)) # ------- # Action: # ------- # Action (draft > opened) openerp_object = 'quality.action' domain = [('state','=','draft')] field_list = ('id',) log_event('Start trigger WF Action (draft > opened)') item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain) for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list): try: item_id = item['id'] sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_action_draft_opened', item_id) log_event('[INFO] bozza > opened, ID: %s' % item_id) except: log_event('[ERROR] Impossibile bozza > opened, ID: %s' % item_id) log_event('End trigger WF Claim (bozza > opened) record %s' % len(item_ids)) # Action (opened > closed > saw) > quelle con la data di chiusura domain = [('state','=','opened'),('closed_date','!=',False)] field_list = ('id',) log_event('Start trigger WF Action (opened > closed > saw)') item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain) for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list): try: item_id = item['id'] sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_action_opened_closed', item_id) sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_action_closed_saw', item_id) log_event('[INFO] opened > closed > saw, ID: %s' % item_id) except: log_event('[ERROR] Impossibile opened > closed > saw, ID: %s' % item_id) log_event('End trigger WF Claim (opened > closed > saw) record %s' % len(item_ids)) # ---------- # Conformed: # ---------- # Conformed (draft > opened > closed > saw) >> non cancellati openerp_object = 'quality.conformed' domain = [('state','=','draft'), ('cancel', '=', False)] field_list = ('id', ) log_event('Start trigger WF Conformed (draft > opened > closed > saw)') item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain) for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list): try: item_id = item['id'] sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_conformed_draft_opened', item_id) sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_conformed_opened_closed', item_id) sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_conformed_closed_saw', item_id) log_event('[INFO] draft > opened > closed > saw, ID: %s' % item_id) except: log_event('[ERROR] Impossibile draft > opened > closed > saw, ID: %s' % item_id) log_event('End trigger WF Claim (draft > opened > closed > saw) record %s' % len(item_ids)) domain = [('state','=','draft'), ('cancel', '=', True)] field_list = ('id', ) log_event('Start trigger WF Conformed (draft > opened > cancel)') item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain) for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list): try: item_id = item['id'] sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_conformed_draft_opened', item_id) sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_conformed_opened_cancel', item_id) log_event('[INFO] draft > opened > closed > saw, ID: %s' % item_id) except: log_event('[ERROR] Impossibile draft > opened > closed > saw, ID: %s' % item_id) log_event('End trigger WF Claim (draft > opened > closed > saw) record %s' % len(item_ids)) # --------- # Sampling: # --------- openerp_object = 'quality.sampling' comment = "Sampling (draft > opened > passed) >> passati" log_event('Start trigger WF %s' % comment) for item_id in sample_passed: try: sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_sampling_draft_opened', item_id) except: log_event('[WARNING] Impossibile %s, ID: %s' % (comment, item_id)) try: sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_sampling_opened_passed', item_id) log_event('[INFO] %s, ID: %s' % (comment, item_id)) except: log_event('[ERROR] Impossibile %s, ID: %s' % (comment, item_id)) log_event('End trigger WF %s record %s' % (comment, len(item_ids))) comment = "Sampling (draft > opened > notpassed) >> not passati" log_event('Start trigger WF %s' % comment) for item_id in sample_notpassed: try: sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_sampling_draft_opened', item_id) except: log_event('[WARNING] Impossibile aprire il campionamento %s, ID: %s' % (comment, item_id)) try: sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_sampling_opened_notpassed', item_id) log_event('[INFO] %s, ID: %s' % (comment, item_id)) except: log_event('[ERROR] Impossibile mettere non passato %s, ID: %s' % (comment, item_id)) log_event('End trigger WF %s record %s' % (comment, len(item_ids))) comment = "Sampling (draft > opened) >> aperti" domain = [('state','=','draft')] field_list = ('id', ) log_event('Start trigger WF %s' % comment) item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain) for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list): try: item_id = item['id'] sock.exec_workflow(dbname, uid, pwd, openerp_object, 'trigger_sampling_draft_opened', item_id) log_event('[INFO] %s, ID: %s' % (comment, item_id)) except: log_event('[ERROR] Impossibile %s, ID: %s' % (comment, item_id)) log_event('End trigger WF %s record %s' % (comment, len(item_ids))) log_event("PROMEMORIA: Aggiornare i contatori nel programma, valori prossimi: ") # TODO mettere il counter total log_event("End of importation")
Micronaet/micronaet-quality
quality/etl/import.py
Python
agpl-3.0
101,183
0.009172
#! /usr/bin/env python # encoding: utf-8 import os,sys,imp,types,tempfile,optparse import Logs,Utils from Constants import* cmds='distclean configure build install clean uninstall check dist distcheck'.split() commands={} is_install=False options={} arg_line=[] launch_dir='' tooldir='' lockfile=os.environ.get('WAFLOCK','.lock-wscript') try:cache_global=os.path.abspath(os.environ['WAFCACHE']) except KeyError:cache_global='' platform=Utils.unversioned_sys_platform() conf_file='conf-runs-%s-%d.pickle'%(platform,ABI) remote_repo=['http://waf.googlecode.com/svn/'] default_prefix=os.environ.get('PREFIX') if not default_prefix: if platform=='win32': d=tempfile.gettempdir() default_prefix=d[0].upper()+d[1:] else:default_prefix='/usr/local/' default_jobs=os.environ.get('JOBS',-1) if default_jobs<1: try: if'SC_NPROCESSORS_ONLN'in os.sysconf_names: default_jobs=os.sysconf('SC_NPROCESSORS_ONLN') else: default_jobs=int(Utils.cmd_output(['sysctl','-n','hw.ncpu'])) except: if os.name=='java': from java.lang import Runtime default_jobs=Runtime.getRuntime().availableProcessors() else: default_jobs=int(os.environ.get('NUMBER_OF_PROCESSORS',1)) default_destdir=os.environ.get('DESTDIR','') def get_usage(self): cmds_str=[] module=Utils.g_module if module: tbl=module.__dict__ keys=list(tbl.keys()) keys.sort() if'build'in tbl: if not module.build.__doc__: module.build.__doc__='builds the project' if'configure'in tbl: if not module.configure.__doc__: module.configure.__doc__='configures the project' ban=['set_options','init','shutdown'] optlst=[x for x in keys if not x in ban and type(tbl[x])is type(parse_args_impl)and tbl[x].__doc__ and not x.startswith('_')] just=max([len(x)for x in optlst]) for x in optlst: cmds_str.append(' %s: %s'%(x.ljust(just),tbl[x].__doc__)) ret='\n'.join(cmds_str) else: ret=' '.join(cmds) return'''waf [command] [options] Main commands (example: ./waf build -j4) %s '''%ret setattr(optparse.OptionParser,'get_usage',get_usage) def create_parser(module=None): Logs.debug('options: create_parser is called') parser=optparse.OptionParser(conflict_handler="resolve",version='waf %s (%s)'%(WAFVERSION,WAFREVISION)) parser.formatter.width=Utils.get_term_cols() p=parser.add_option p('-j','--jobs',type='int',default=default_jobs,help='amount of parallel jobs (%r)'%default_jobs,dest='jobs') p('-k','--keep',action='store_true',default=False,help='keep running happily on independent task groups',dest='keep') p('-v','--verbose',action='count',default=0,help='verbosity level -v -vv or -vvv [default: 0]',dest='verbose') p('--nocache',action='store_true',default=False,help='ignore the WAFCACHE (if set)',dest='nocache') p('--zones',action='store',default='',help='debugging zones (task_gen, deps, tasks, etc)',dest='zones') p('-p','--progress',action='count',default=0,help='-p: progress bar; -pp: ide output',dest='progress_bar') p('--targets',action='store',default='',help='build given task generators, e.g. "target1,target2"',dest='compile_targets') gr=optparse.OptionGroup(parser,'configuration options') parser.add_option_group(gr) gr.add_option('-b','--blddir',action='store',default='',help='build dir for the project (configuration)',dest='blddir') gr.add_option('-s','--srcdir',action='store',default='',help='src dir for the project (configuration)',dest='srcdir') gr.add_option('--prefix',help='installation prefix (configuration) [default: %r]'%default_prefix,default=default_prefix,dest='prefix') gr.add_option('--download',action='store_true',default=False,help='try to download the tools if missing',dest='download') gr=optparse.OptionGroup(parser,'installation options') parser.add_option_group(gr) gr.add_option('--destdir',help='installation root [default: %r]'%default_destdir,default=default_destdir,dest='destdir') gr.add_option('-f','--force',action='store_true',default=False,help='force file installation',dest='force') return parser def parse_args_impl(parser,_args=None): global options,commands,arg_line (options,args)=parser.parse_args(args=_args) arg_line=args commands={} for var in cmds:commands[var]=0 if not args: commands['build']=1 args.append('build') for arg in args: commands[arg]=True if'check'in args: idx=args.index('check') try: bidx=args.index('build') if bidx>idx: raise ValueError('build before check') except ValueError as e: args.insert(idx,'build') if args[0]!='init': args.insert(0,'init') if options.keep:options.jobs=1 if options.jobs<1:options.jobs=1 if'install'in sys.argv or'uninstall'in sys.argv: options.destdir=options.destdir and os.path.abspath(os.path.expanduser(options.destdir)) Logs.verbose=options.verbose Logs.init_log() if options.zones: Logs.zones=options.zones.split(',') if not Logs.verbose:Logs.verbose=1 elif Logs.verbose>0: Logs.zones=['runner'] if Logs.verbose>2: Logs.zones=['*'] class Handler(Utils.Context): parser=None def __init__(self,module=None): self.parser=create_parser(module) self.cwd=os.getcwd() Handler.parser=self def add_option(self,*k,**kw): self.parser.add_option(*k,**kw) def add_option_group(self,*k,**kw): return self.parser.add_option_group(*k,**kw) def get_option_group(self,opt_str): return self.parser.get_option_group(opt_str) def sub_options(self,*k,**kw): if not k:raise Utils.WscriptError('folder expected') self.recurse(k[0],name='set_options') def tool_options(self,*k,**kw): if not k[0]: raise Utils.WscriptError('invalid tool_options call %r %r'%(k,kw)) tools=Utils.to_list(k[0]) path=Utils.to_list(kw.get('tdir',kw.get('tooldir',tooldir))) for tool in tools: tool=tool.replace('++','xx') if tool=='java':tool='javaw' if tool.lower()=='unittest':tool='unittestw' module=Utils.load_tool(tool,path) try: fun=module.set_options except AttributeError: pass else: fun(kw.get('option_group',self)) def parse_args(self,args=None): parse_args_impl(self.parser,args)
tsarnowski/hamster
wafadmin/Options.py
Python
gpl-3.0
6,022
0.06277
#!C:\Python27\python.exe # Filename: GenericBytecode.py # -*- coding: utf-8 -*- import os import Settings ''' Generic Bytecode Simply add, remove or modify bytecode for use in KHMS ''' createFrame = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', 'aload_0', 'getfield', \ 'invokevirtual', 'iadd', 'i2b', 'bastore', 'return'] writeDWord = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \ 'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \ 'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \ 'bipush', 'ishr', 'i2b', 'bastore', 'aload_0', 'getfield', \ 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \ 'putfield', 'iload_1', 'bipush', 'ishr', 'i2b', 'bastore', \ 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'i2b', 'bastore', 'return'] # writeWordBigEndian = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \ # 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \ # 'i2b', 'bastore', 'return', 'aload_0', 'dup', 'getfield', \ # 'iconst_3', 'iadd', 'putfield', 'sipush', 'aload_0', \ # 'getfield', 'aload_0', 'getfield', 'iconst_3', 'isub', \ # 'baload', 'bipush', 'ishl', 'iand', 'sipush', 'aload_0', \ # 'getfield', 'aload_0', 'getfield', 'iconst_2', 'isub', \ # 'baload', 'bipush', 'ishl', 'iand', 'iadd', 'sipush', \ # 'aload_0', 'getfield', 'aload_0', 'getfield', 'iconst_1', \ # 'isub', 'baload', 'iand', 'iadd', 'ireturn'] writeWordBigEndian = ['aload_0', 'getfield Stream/buffer [B', 'aload_0', 'dup', 'getfield Stream/currentOffset I', 'dup_x1', 'iconst_1', 'iadd', 'putfield Stream/currentOffset I', 'iload_1', 'i2b', 'bastore', 'return'] writeWord = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \ 'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \ 'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \ 'i2b', 'bastore', 'return'] writeDWordBigEndian = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \ 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \ 'bipush', 'ishr', 'i2b', 'bastore', 'aload_0', \ 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', \ 'ishr', 'i2b', 'bastore', 'aload_0', 'getfield', \ 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', \ 'iadd', 'putfield', 'iload_1', 'i2b', 'bastore', 'return'] method403 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'i2b', 'bastore', \ 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \ 'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \ 'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \ 'bipush', 'ishr', 'i2b', 'bastore', 'aload_0', 'getfield', \ 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \ 'putfield', 'iload_1', 'bipush', 'ishr', 'i2b', 'bastore', 'return'] writeQWord = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'lload_1', 'bipush', 'lshr', \ 'l2i', 'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', \ 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', \ 'lload_1', 'bipush', 'lshr', 'l2i', 'i2b', 'bastore', 'aload_0', \ 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', \ 'iadd', 'putfield', 'lload_1', 'bipush', 'lshr', 'l2i', 'i2b', \ 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \ 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'lload_1', 'bipush', \ 'lshr', 'l2i', 'i2b', 'bastore', 'aload_0', 'getfield', \ 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \ 'putfield', 'lload_1', 'bipush', 'lshr', 'l2i', 'i2b', \ 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \ 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'lload_1', 'bipush', \ 'lshr', 'l2i', 'i2b', 'bastore', 'aload_0', 'getfield', \ 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \ 'putfield', 'lload_1', 'bipush', 'lshr', 'l2i', 'i2b', \ 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', \ 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'lload_1', 'l2i', \ 'i2b', 'bastore', 'goto', 'astore_3', 'new', 'dup', \ 'invokespecial', 'ldc', 'invokevirtual', 'lload_1', \ 'invokevirtual', 'ldc', 'invokevirtual', 'aload_3', \ 'invokevirtual', 'invokevirtual', 'invokevirtual', \ 'invokestatic', 'new', 'dup', 'invokespecial', 'athrow', 'return'] writeString = ['aload_1', 'invokevirtual', 'iconst_0', 'aload_0', 'getfield', \ 'aload_0', 'getfield', 'aload_1', 'invokevirtual', \ 'invokestatic', 'aload_0', 'dup', 'getfield', 'aload_1', \ 'invokevirtual', 'iadd', 'putfield', 'aload_0', 'getfield', \ 'aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'iadd', \ 'putfield', 'bipush', 'bastore', 'return'] method424 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'ineg', 'i2b', \ 'bastore', 'return'] method425 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'sipush', 'iload_1', 'isub', \ 'i2b', 'bastore', 'return'] method431 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'i2b', 'bastore', \ 'aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \ 'i2b', 'bastore', 'return'] method432 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'bipush', 'ishr', \ 'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \ 'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \ 'sipush', 'iadd', 'i2b', 'bastore', 'return'] method433 = ['aload_0', 'getfield', 'aload_0', 'dup', 'getfield', 'dup_x1', \ 'iconst_1', 'iadd', 'putfield', 'iload_1', 'sipush', 'iadd', \ 'i2b', 'bastore', 'aload_0', 'getfield', 'aload_0', 'dup', \ 'getfield', 'dup_x1', 'iconst_1', 'iadd', 'putfield', 'iload_1', \ 'bipush', 'ishr', 'i2b', 'bastore', 'return'] getNextKey = ['aload_0', 'dup', 'getfield', 'dup_x1', 'iconst_1', 'isub', \ 'putfield', 'ifne', 'aload_0', 'invokespecial', 'aload_0', \ 'sipush', 'putfield', 'aload_0', 'getfield', 'aload_0', \ 'getfield', 'iaload', 'ireturn'] isaac = ['aload_0', 'dup', 'getfield', 'aload_0', 'dup', 'getfield', \ 'iconst_1', 'iadd', 'dup_x1', 'putfield', 'iadd', 'putfield', \ 'iconst_0', 'istore_1', 'goto', 'aload_0', 'getfield', 'iload_1', \ 'iaload', 'istore_2', 'iload_1', 'iconst_3', 'iand', 'ifne', \ 'aload_0', 'dup', 'getfield', 'aload_0', 'getfield', 'bipush', \ 'ishl', 'ixor', 'putfield', 'goto', 'iload_1', 'iconst_3', 'iand', \ 'iconst_1', 'if_icmpne', 'aload_0', 'dup', 'getfield', 'aload_0', \ 'getfield', 'bipush', 'iushr', 'ixor', 'putfield', 'goto', \ 'iload_1', 'iconst_3', 'iand', 'iconst_2', 'if_icmpne', 'aload_0', \ 'dup', 'getfield', 'aload_0', 'getfield', 'iconst_2', 'ishl', \ 'ixor', 'putfield', 'goto', 'iload_1', 'iconst_3', 'iand', \ 'iconst_3', 'if_icmpne', 'aload_0', 'dup', 'getfield', 'aload_0', \ 'getfield', 'bipush', 'iushr', 'ixor', 'putfield', 'aload_0', \ 'dup', 'getfield', 'aload_0', 'getfield', 'iload_1', 'sipush', \ 'iadd', 'sipush', 'iand', 'iaload', 'iadd', 'putfield', 'aload_0', \ 'getfield', 'iload_1', 'aload_0', 'getfield', 'iload_2', 'sipush', \ 'iand', 'iconst_2', 'ishr', 'iaload', 'aload_0', 'getfield', \ 'iadd', 'aload_0', 'getfield', 'iadd', 'dup', 'istore_3', \ 'iastore', 'aload_0', 'getfield', 'iload_1', 'aload_0', 'aload_0', \ 'getfield', 'iload_3', 'bipush', 'ishr', 'sipush', 'iand', \ 'iconst_2', 'ishr', 'iaload', 'iload_2', 'iadd', 'dup_x1', \ 'putfield', 'iastore', 'iinc', 'iload_1', 'sipush', \ 'if_icmplt', 'return'] initializeKeySet2 = ['ldc', 'dup', 'istore', 'dup', 'istore', 'dup', 'istore', \ 'dup', 'istore', 'dup', 'istore_3', 'dup', 'istore_2', \ 'dup', 'istore_1', 'istore', 'iconst_0', 'istore', \ 'iload', 'iconst_4', 'if_icmpge', 'iload', 'iload_1', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload_3', 'iload', \ 'iadd', 'istore_3', 'iload_1', 'iload_2', 'iadd', \ 'istore_1', 'iload_1', 'iload_2', 'iconst_2', 'iushr', \ 'ixor', 'istore_1', 'iload', 'iload_1', 'iadd', 'istore', \ 'iload_2', 'iload_3', 'iadd', 'istore_2', 'iload_2', \ 'iload_3', 'bipush', 'ishl', 'ixor', 'istore_2', \ 'iload', 'iload_2', 'iadd', 'istore', 'iload_3', \ 'iload', 'iadd', 'istore_3', 'iload_3', 'iload', \ 'bipush', 'iushr', 'ixor', 'istore_3', 'iload', \ 'iload_3', 'iadd', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'iconst_4', 'iushr', 'ixor', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload_1', 'iload', 'iadd', 'istore_1', \ 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'bipush', 'iushr', 'ixor', 'istore', \ 'iload_2', 'iload', 'iadd', 'istore_2', 'iload', \ 'iload_1', 'iadd', 'istore', 'iinc', 'goto', \ 'iconst_0', 'istore', 'iload', 'sipush', 'if_icmpge', \ 'iload', 'aload_0', 'getfield', 'iload', 'iaload', \ 'iadd', 'istore', 'iload_1', 'aload_0', 'getfield', \ 'iload', 'iconst_1', 'iadd', 'iaload', 'iadd', \ 'istore_1', 'iload_2', 'aload_0', 'getfield', \ 'iload', 'iconst_2', 'iadd', 'iaload', 'iadd', \ 'istore_2', 'iload_3', 'aload_0', 'getfield', \ 'iload', 'iconst_3', 'iadd', 'iaload', 'iadd', \ 'istore_3', 'iload', 'aload_0', 'getfield', \ 'iload', 'iconst_4', 'iadd', 'iaload', 'iadd', \ 'istore', 'iload', 'aload_0', 'getfield', 'iload', \ 'iconst_5', 'iadd', 'iaload', 'iadd', 'istore', \ 'iload', 'aload_0', 'getfield', 'iload', 'bipush', \ 'iadd', 'iaload', 'iadd', 'istore', 'iload', 'aload_0', \ 'getfield', 'iload', 'bipush', 'iadd', 'iaload', 'iadd', \ 'istore', 'iload', 'iload_1', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload_3', 'iload', 'iadd', 'istore_3', \ 'iload_1', 'iload_2', 'iadd', 'istore_1', 'iload_1', \ 'iload_2', 'iconst_2', 'iushr', 'ixor', 'istore_1', \ 'iload', 'iload_1', 'iadd', 'istore', 'iload_2', \ 'iload_3', 'iadd', 'istore_2', 'iload_2', 'iload_3', \ 'bipush', 'ishl', 'ixor', 'istore_2', 'iload', \ 'iload_2', 'iadd', 'istore', 'iload_3', 'iload', \ 'iadd', 'istore_3', 'iload_3', 'iload', 'bipush', \ 'iushr', 'ixor', 'istore_3', 'iload', 'iload_3', \ 'iadd', 'istore', 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'bipush', 'ishl', 'ixor', 'istore', \ 'iload', 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'iadd', 'istore', 'iload', 'iload', 'iconst_4', \ 'iushr', 'ixor', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'bipush', 'ishl', 'ixor', 'istore', \ 'iload_1', 'iload', 'iadd', 'istore_1', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'bipush', 'iushr', 'ixor', 'istore', 'iload_2', \ 'iload', 'iadd', 'istore_2', 'iload', 'iload_1', \ 'iadd', 'istore', 'aload_0', 'getfield', 'iload', \ 'iload', 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_1', 'iadd', 'iload_1', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_2', 'iadd', 'iload_2', \ 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_3', 'iadd', 'iload_3', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_4', 'iadd', 'iload', \ 'iastore', 'aload_0', 'getfield', 'iload', 'iconst_5', \ 'iadd', 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iload', 'iastore', \ 'aload_0', 'getfield', 'iload', 'bipush', 'iadd', \ 'iload', 'iastore', 'iinc', 'goto', 'iconst_0', \ 'istore', 'iload', 'sipush', 'if_icmpge', 'iload', \ 'aload_0', 'getfield', 'iload', 'iaload', 'iadd', \ 'istore', 'iload_1', 'aload_0', 'getfield', \ 'iload', 'iconst_1', 'iadd', 'iaload', 'iadd', \ 'istore_1', 'iload_2', 'aload_0', 'getfield', \ 'iload', 'iconst_2', 'iadd', 'iaload', 'iadd', \ 'istore_2', 'iload_3', 'aload_0', 'getfield', \ 'iload', 'iconst_3', 'iadd', 'iaload', 'iadd', \ 'istore_3', 'iload', 'aload_0', 'getfield', \ 'iload', 'iconst_4', 'iadd', 'iaload', 'iadd', \ 'istore', 'iload', 'aload_0', 'getfield', 'iload', \ 'iconst_5', 'iadd', 'iaload', 'iadd', 'istore', \ 'iload', 'aload_0', 'getfield', 'iload', 'bipush', \ 'iadd', 'iaload', 'iadd', 'istore', 'iload', \ 'aload_0', 'getfield', 'iload', 'bipush', 'iadd', \ 'iaload', 'iadd', 'istore', 'iload', 'iload_1', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload_3', \ 'iload', 'iadd', 'istore_3', 'iload_1', 'iload_2', \ 'iadd', 'istore_1', 'iload_1', 'iload_2', \ 'iconst_2', 'iushr', 'ixor', 'istore_1', 'iload', \ 'iload_1', 'iadd', 'istore', 'iload_2', 'iload_3', \ 'iadd', 'istore_2', 'iload_2', 'iload_3', 'bipush', \ 'ishl', 'ixor', 'istore_2', 'iload', 'iload_2', \ 'iadd', 'istore', 'iload_3', 'iload', 'iadd', \ 'istore_3', 'iload_3', 'iload', 'bipush', 'iushr', \ 'ixor', 'istore_3', 'iload', 'iload_3', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'bipush', 'ishl', 'ixor', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iconst_4', 'iushr', 'ixor', \ 'istore', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', 'bipush', \ 'ishl', 'ixor', 'istore', 'iload_1', 'iload', 'iadd', \ 'istore_1', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'bipush', 'iushr', 'ixor', 'istore', 'iload_2', \ 'iload', 'iadd', 'istore_2', 'iload', 'iload_1', 'iadd', \ 'istore', 'aload_0', 'getfield', 'iload', 'iload', \ 'iastore', 'aload_0', 'getfield', 'iload', 'iconst_1', \ 'iadd', 'iload_1', 'iastore', 'aload_0', 'getfield', \ 'iload', 'iconst_2', 'iadd', 'iload_2', 'iastore', \ 'aload_0', 'getfield', 'iload', 'iconst_3', 'iadd', \ 'iload_3', 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_4', 'iadd', 'iload', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_5', 'iadd', 'iload', \ 'iastore', 'aload_0', 'getfield', 'iload', 'bipush', \ 'iadd', 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iload', 'iastore', 'iinc', \ 'goto', 'aload_0', 'invokespecial', 'aload_0', \ 'sipush', 'putfield', 'return'] initializeKeySet3 = ['ldc', 'dup', 'istore', 'dup', 'istore', 'dup', 'istore', \ 'dup', 'istore', 'dup', 'istore_3', 'dup', 'istore_2', \ 'dup', 'istore_1', 'istore', 'iconst_0', 'istore', \ 'goto', 'iload', 'iload_1', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload_3', 'iload', 'iadd', 'istore_3', \ 'iload_1', 'iload_2', 'iadd', 'dup', 'istore_1', \ 'iload_2', 'iconst_2', 'iushr', 'ixor', 'istore_1', \ 'iload', 'iload_1', 'iadd', 'istore', 'iload_2', \ 'iload_3', 'iadd', 'dup', 'istore_2', 'iload_3', \ 'bipush', 'ishl', 'ixor', 'istore_2', 'iload', \ 'iload_2', 'iadd', 'istore', 'iload_3', 'iload', \ 'iadd', 'dup', 'istore_3', 'iload', 'bipush', \ 'iushr', 'ixor', 'istore_3', 'iload', \ 'iload_3', 'iadd', 'istore', 'iload', 'iload', \ 'iadd', 'dup', 'istore', 'iload', 'bipush', \ 'ishl', 'ixor', 'istore', 'iload', 'iload', \ 'iadd', 'istore', 'iload', 'iload', 'iadd', \ 'dup', 'istore', 'iload', 'iconst_4', \ 'iushr', 'ixor', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'iadd', 'dup', 'istore', 'iload', 'bipush', \ 'ishl', 'ixor', 'istore', 'iload_1', 'iload', \ 'iadd', 'istore_1', 'iload', 'iload', 'iadd', \ 'dup', 'istore', 'iload', 'bipush', 'iushr', \ 'ixor', 'istore', 'iload_2', 'iload', 'iadd', \ 'istore_2', 'iload', 'iload_1', 'iadd', \ 'istore', 'iinc', 'iload', 'iconst_4', 'if_icmplt', \ 'iconst_0', 'istore', 'goto', 'iload', 'aload_0', \ 'getfield', 'iload', 'iaload', 'iadd', 'istore', \ 'iload_1', 'aload_0', 'getfield', 'iload', \ 'iconst_1', 'iadd', 'iaload', 'iadd', 'istore_1', \ 'iload_2', 'aload_0', 'getfield', 'iload', \ 'iconst_2', 'iadd', 'iaload', 'iadd', 'istore_2', \ 'iload_3', 'aload_0', 'getfield', 'iload', 'iconst_3', \ 'iadd', 'iaload', 'iadd', 'istore_3', 'iload', \ 'aload_0', 'getfield', 'iload', 'iconst_4', 'iadd', \ 'iaload', 'iadd', 'istore', 'iload', 'aload_0', \ 'getfield', 'iload', 'iconst_5', 'iadd', 'iaload', \ 'iadd', 'istore', 'iload', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iaload', 'iadd', \ 'istore', 'iload', 'aload_0', 'getfield', 'iload', \ 'bipush', 'iadd', 'iaload', 'iadd', 'istore', 'iload', \ 'iload_1', 'bipush', 'ishl', 'ixor', 'istore', \ 'iload_3', 'iload', 'iadd', 'istore_3', 'iload_1', \ 'iload_2', 'iadd', 'dup', 'istore_1', 'iload_2', \ 'iconst_2', 'iushr', 'ixor', 'istore_1', 'iload', \ 'iload_1', 'iadd', 'istore', 'iload_2', 'iload_3', \ 'iadd', 'dup', 'istore_2', 'iload_3', 'bipush', \ 'ishl', 'ixor', 'istore_2', 'iload', 'iload_2', \ 'iadd', 'istore', 'iload_3', 'iload', 'iadd', 'dup', \ 'istore_3', 'iload', 'bipush', 'iushr', 'ixor', \ 'istore_3', 'iload', 'iload_3', 'iadd', 'istore', \ 'iload', 'iload', 'iadd', 'dup', 'istore', 'iload', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload', 'iload', \ 'iadd', 'istore', 'iload', 'iload', 'iadd', 'dup', \ 'istore', 'iload', 'iconst_4', 'iushr', 'ixor', \ 'istore', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'iadd', 'dup', 'istore', 'iload', 'bipush', \ 'ishl', 'ixor', 'istore', 'iload_1', 'iload', 'iadd', \ 'istore_1', 'iload', 'iload', 'iadd', 'dup', 'istore', \ 'iload', 'bipush', 'iushr', 'ixor', 'istore', \ 'iload_2', 'iload', 'iadd', 'istore_2', 'iload', \ 'iload_1', 'iadd', 'istore', 'aload_0', 'getfield', \ 'iload', 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'iconst_1', 'iadd', 'iload_1', 'iastore', \ 'aload_0', 'getfield', 'iload', 'iconst_2', 'iadd', \ 'iload_2', 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_3', 'iadd', 'iload_3', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_4', 'iadd', 'iload', \ 'iastore', 'aload_0', 'getfield', 'iload', 'iconst_5', \ 'iadd', 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iload', 'iastore', \ 'aload_0', 'getfield', 'iload', 'bipush', 'iadd', \ 'iload', 'iastore', 'iinc', 'iload', 'sipush', \ 'if_icmplt', 'iconst_0', 'istore', 'goto', 'iload', \ 'aload_0', 'getfield', 'iload', 'iaload', 'iadd', \ 'istore', 'iload_1', 'aload_0', 'getfield', 'iload', \ 'iconst_1', 'iadd', 'iaload', 'iadd', 'istore_1', \ 'iload_2', 'aload_0', 'getfield', 'iload', 'iconst_2', \ 'iadd', 'iaload', 'iadd', 'istore_2', 'iload_3', \ 'aload_0', 'getfield', 'iload', 'iconst_3', 'iadd', \ 'iaload', 'iadd', 'istore_3', 'iload', 'aload_0', \ 'getfield', 'iload', 'iconst_4', 'iadd', 'iaload', \ 'iadd', 'istore', 'iload', 'aload_0', 'getfield', \ 'iload', 'iconst_5', 'iadd', 'iaload', 'iadd', \ 'istore', 'iload', 'aload_0', 'getfield', 'iload', \ 'bipush', 'iadd', 'iaload', 'iadd', 'istore', 'iload', \ 'aload_0', 'getfield', 'iload', 'bipush', 'iadd', \ 'iaload', 'iadd', 'istore', 'iload', 'iload_1', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload_3', \ 'iload', 'iadd', 'istore_3', 'iload_1', 'iload_2', \ 'iadd', 'dup', 'istore_1', 'iload_2', 'iconst_2', \ 'iushr', 'ixor', 'istore_1', 'iload', 'iload_1', \ 'iadd', 'istore', 'iload_2', 'iload_3', 'iadd', 'dup', \ 'istore_2', 'iload_3', 'bipush', 'ishl', 'ixor', \ 'istore_2', 'iload', 'iload_2', 'iadd', 'istore', \ 'iload_3', 'iload', 'iadd', 'dup', 'istore_3', \ 'iload', 'bipush', 'iushr', 'ixor', 'istore_3', \ 'iload', 'iload_3', 'iadd', 'istore', 'iload', \ 'iload', 'iadd', 'dup', 'istore', 'iload', 'bipush', \ 'ishl', 'ixor', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'dup', 'istore', \ 'iload', 'iconst_4', 'iushr', 'ixor', 'istore', \ 'iload', 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'iadd', 'dup', 'istore', 'iload', 'bipush', 'ishl', \ 'ixor', 'istore', 'iload_1', 'iload', 'iadd', \ 'istore_1', 'iload', 'iload', 'iadd', 'dup', 'istore', \ 'iload', 'bipush', 'iushr', 'ixor', 'istore', \ 'iload_2', 'iload', 'iadd', 'istore_2', 'iload', \ 'iload_1', 'iadd', 'istore', 'aload_0', 'getfield', \ 'iload', 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'iconst_1', 'iadd', 'iload_1', 'iastore', \ 'aload_0', 'getfield', 'iload', 'iconst_2', 'iadd', \ 'iload_2', 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_3', 'iadd', 'iload_3', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_4', 'iadd', 'iload', \ 'iastore', 'aload_0', 'getfield', 'iload', 'iconst_5', \ 'iadd', 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iload', 'iastore', \ 'aload_0', 'getfield', 'iload', 'bipush', 'iadd', \ 'iload', 'iastore', 'iinc', 'iload', 'sipush', \ 'if_icmplt', 'aload_0', 'invokespecial', \ 'aload_0', 'sipush', 'putfield', 'return'] initializeKeySet4 = ['ldc', 'dup', 'istore', 'dup', 'istore', 'dup', \ 'istore', 'dup', 'istore', 'dup', 'istore_3', 'dup', \ 'istore_2', 'dup', 'istore_1', 'istore', 'iconst_0', \ 'istore', 'iload', 'iconst_4', 'if_icmpge', \ 'iload', 'iload_1', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload_3', 'iload', 'iadd', 'istore_3', \ 'iload_1', 'iload_2', 'iadd', 'istore_1', 'iload_1', \ 'iload_2', 'iconst_2', 'iushr', 'ixor', \ 'istore_1', 'iload', 'iload_1', 'iadd', 'istore', \ 'iload_2', 'iload_3', 'iadd', 'istore_2', 'iload_2', \ 'iload_3', 'bipush', 'ishl', 'ixor', 'istore_2', \ 'iload', 'iload_2', 'iadd', 'istore', 'iload_3', \ 'iload', 'iadd', 'istore_3', 'iload_3', 'iload', \ 'bipush', 'iushr', 'ixor', 'istore_3', 'iload', \ 'iload_3', 'iadd', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', 'iconst_4', \ 'iushr', 'ixor', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'bipush', 'ishl', 'ixor', 'istore', 'iload_1', \ 'iload', 'iadd', 'istore_1', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'bipush', 'iushr', 'ixor', \ 'istore', 'iload_2', 'iload', 'iadd', 'istore_2', \ 'iload', 'iload_1', 'iadd', 'istore', 'iinc', 'goto', \ 'iconst_0', 'istore', 'iload', 'sipush', 'if_icmpge', \ 'iload', 'aload_0', 'getfield', 'iload', 'iaload', \ 'iadd', 'istore', 'iload_1', 'aload_0', 'getfield', \ 'iload', 'iconst_1', 'iadd', 'iaload', 'iadd', \ 'istore_1', 'iload_2', 'aload_0', 'getfield', 'iload', \ 'iconst_2', 'iadd', 'iaload', 'iadd', 'istore_2', \ 'iload_3', 'aload_0', 'getfield', 'iload', 'iconst_3', \ 'iadd', 'iaload', 'iadd', 'istore_3', 'iload', \ 'aload_0', 'getfield', 'iload', 'iconst_4', 'iadd', \ 'iaload', 'iadd', 'istore', 'iload', 'aload_0', \ 'getfield', 'iload', 'iconst_5', 'iadd', 'iaload', \ 'iadd', 'istore', 'iload', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iaload', 'iadd', 'istore', \ 'iload', 'aload_0', 'getfield', 'iload', 'bipush', \ 'iadd', 'iaload', 'iadd', 'istore', 'iload', 'iload_1', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload_3', 'iload', \ 'iadd', 'istore_3', 'iload_1', 'iload_2', 'iadd', \ 'istore_1', 'iload_1', 'iload_2', 'iconst_2', 'iushr', \ 'ixor', 'istore_1', 'iload', 'iload_1', 'iadd', \ 'istore', 'iload_2', 'iload_3', 'iadd', 'istore_2', \ 'iload_2', 'iload_3', 'bipush', 'ishl', 'ixor', \ 'istore_2', 'iload', 'iload_2', 'iadd', 'istore', \ 'iload_3', 'iload', 'iadd', 'istore_3', 'iload_3', \ 'iload', 'bipush', 'iushr', 'ixor', 'istore_3', \ 'iload', 'iload_3', 'iadd', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iconst_4', 'iushr', \ 'ixor', 'istore', 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload_1', \ 'iload', 'iadd', 'istore_1', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'bipush', 'iushr', 'ixor', \ 'istore', 'iload_2', 'iload', 'iadd', 'istore_2', \ 'iload', 'iload_1', 'iadd', 'istore', 'aload_0', \ 'getfield', 'iload', 'iload', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_1', 'iadd', 'iload_1', \ 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_2', 'iadd', 'iload_2', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_3', 'iadd', 'iload_3', \ 'iastore', 'aload_0', 'getfield', 'iload', 'iconst_4', \ 'iadd', 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'iconst_5', 'iadd', 'iload', 'iastore', \ 'aload_0', 'getfield', 'iload', 'bipush', 'iadd', \ 'iload', 'iastore', 'aload_0', 'getfield', 'iload', \ 'bipush', 'iadd', 'iload', 'iastore', 'iinc', \ 'goto', 'iconst_0', 'istore', 'iload', 'sipush', \ 'if_icmpge', 'iload', 'aload_0', 'getfield', 'iload', \ 'iaload', 'iadd', 'istore', 'iload_1', 'aload_0', \ 'getfield', 'iload', 'iconst_1', 'iadd', 'iaload', \ 'iadd', 'istore_1', 'iload_2', 'aload_0', 'getfield', \ 'iload', 'iconst_2', 'iadd', 'iaload', 'iadd', \ 'istore_2', 'iload_3', 'aload_0', 'getfield', 'iload', \ 'iconst_3', 'iadd', 'iaload', 'iadd', 'istore_3', \ 'iload', 'aload_0', 'getfield', 'iload', 'iconst_4', \ 'iadd', 'iaload', 'iadd', 'istore', 'iload', \ 'aload_0', 'getfield', 'iload', 'iconst_5', 'iadd', \ 'iaload', 'iadd', 'istore', 'iload', 'aload_0', \ 'getfield', 'iload', 'bipush', 'iadd', 'iaload', \ 'iadd', 'istore', 'iload', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iaload', 'iadd', 'istore', \ 'iload', 'iload_1', 'bipush', 'ishl', 'ixor', 'istore', \ 'iload_3', 'iload', 'iadd', 'istore_3', 'iload_1', \ 'iload_2', 'iadd', 'istore_1', 'iload_1', 'iload_2', \ 'iconst_2', 'iushr', 'ixor', 'istore_1', 'iload', \ 'iload_1', 'iadd', 'istore', 'iload_2', 'iload_3', \ 'iadd', 'istore_2', 'iload_2', 'iload_3', 'bipush', \ 'ishl', 'ixor', 'istore_2', 'iload', 'iload_2', \ 'iadd', 'istore', 'iload_3', 'iload', 'iadd', \ 'istore_3', 'iload_3', 'iload', 'bipush', 'iushr', \ 'ixor', 'istore_3', 'iload', 'iload_3', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'bipush', 'ishl', 'ixor', 'istore', \ 'iload', 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'iadd', 'istore', 'iload', 'iload', 'iconst_4', \ 'iushr', 'ixor', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload_1', 'iload', 'iadd', 'istore_1', \ 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'bipush', 'iushr', 'ixor', 'istore', \ 'iload_2', 'iload', 'iadd', 'istore_2', 'iload', \ 'iload_1', 'iadd', 'istore', 'aload_0', 'getfield', \ 'iload', 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'iconst_1', 'iadd', 'iload_1', 'iastore', \ 'aload_0', 'getfield', 'iload', 'iconst_2', 'iadd', \ 'iload_2', 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_3', 'iadd', 'iload_3', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_4', 'iadd', 'iload', \ 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_5', 'iadd', 'iload', 'iastore', 'aload_0', \ 'getfield', 'iload', 'bipush', 'iadd', 'iload', \ 'iastore', 'aload_0', 'getfield', 'iload', 'bipush', \ 'iadd', 'iload', 'iastore', 'iinc', 'goto', 'aload_0', \ 'invokespecial', 'aload_0', 'sipush', 'putfield', 'return'] initializeKeySet = ['ldc', 'dup', 'istore', 'dup', 'istore', 'dup', \ 'istore', 'dup', 'istore', 'dup', 'istore_3', \ 'dup', 'istore_2', 'dup', 'istore_1', 'istore', \ 'iconst_0', 'istore', 'goto', 'iload', 'iload_1', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload_3', \ 'iload', 'iadd', 'istore_3', 'iload_1', \ 'iload_2', 'iadd', 'istore_1', 'iload_1', \ 'iload_2', 'iconst_2', 'iushr', 'ixor', \ 'istore_1', 'iload', 'iload_1', 'iadd', 'istore', \ 'iload_2', 'iload_3', 'iadd', 'istore_2', \ 'iload_2', 'iload_3', 'bipush', 'ishl', 'ixor', \ 'istore_2', 'iload', 'iload_2', 'iadd', 'istore', \ 'iload_3', 'iload', 'iadd', 'istore_3', \ 'iload_3', 'iload', 'bipush', 'iushr', 'ixor', \ 'istore_3', 'iload', 'iload_3', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'iconst_4', 'iushr', 'ixor', 'istore', \ 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'bipush', 'ishl', 'ixor', 'istore', 'iload_1', \ 'iload', 'iadd', 'istore_1', 'iload', 'iload', \ 'iadd', 'istore', 'iload', 'iload', 'bipush', \ 'iushr', 'ixor', 'istore', 'iload_2', 'iload', \ 'iadd', 'istore_2', 'iload', 'iload_1', 'iadd', \ 'istore', 'iinc', 'iload', 'iconst_4', \ 'if_icmplt', 'iconst_0', 'istore', 'goto', \ 'iload', 'aload_0', 'getfield', 'iload', \ 'iaload', 'iadd', 'istore', 'iload_1', \ 'aload_0', 'getfield', 'iload', 'iconst_1', \ 'iadd', 'iaload', 'iadd', 'istore_1', \ 'iload_2', 'aload_0', 'getfield', 'iload', \ 'iconst_2', 'iadd', 'iaload', 'iadd', \ 'istore_2', 'iload_3', 'aload_0', \ 'getfield', 'iload', 'iconst_3', 'iadd', \ 'iaload', 'iadd', 'istore_3', 'iload', \ 'aload_0', 'getfield', 'iload', 'iconst_4', \ 'iadd', 'iaload', 'iadd', 'istore', \ 'iload', 'aload_0', 'getfield', 'iload', \ 'iconst_5', 'iadd', 'iaload', 'iadd', \ 'istore', 'iload', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iaload', \ 'iadd', 'istore', 'iload', 'aload_0', \ 'getfield', 'iload', 'bipush', 'iadd', \ 'iaload', 'iadd', 'istore', 'iload', \ 'iload_1', 'bipush', 'ishl', 'ixor', \ 'istore', 'iload_3', 'iload', 'iadd', \ 'istore_3', 'iload_1', 'iload_2', 'iadd', \ 'istore_1', 'iload_1', 'iload_2', \ 'iconst_2', 'iushr', 'ixor', 'istore_1', \ 'iload', 'iload_1', 'iadd', 'istore', \ 'iload_2', 'iload_3', 'iadd', 'istore_2', \ 'iload_2', 'iload_3', 'bipush', 'ishl', \ 'ixor', 'istore_2', 'iload', 'iload_2', \ 'iadd', 'istore', 'iload_3', 'iload', \ 'iadd', 'istore_3', 'iload_3', 'iload', \ 'bipush', 'iushr', 'ixor', 'istore_3', \ 'iload', 'iload_3', 'iadd', 'istore', \ 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'bipush', 'ishl', \ 'ixor', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', \ 'iload', 'iload', 'iconst_4', \ 'iushr', 'ixor', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'bipush', 'ishl', 'ixor', 'istore', \ 'iload_1', 'iload', 'iadd', 'istore_1', 'iload', \ 'iload', 'iadd', 'istore', 'iload', 'iload', \ 'bipush', 'iushr', 'ixor', 'istore', 'iload_2', \ 'iload', 'iadd', 'istore_2', 'iload', 'iload_1', \ 'iadd', 'istore', 'aload_0', 'getfield', 'iload', \ 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'iconst_1', 'iadd', 'iload_1', 'iastore', \ 'aload_0', 'getfield', 'iload', 'iconst_2', \ 'iadd', 'iload_2', 'iastore', 'aload_0', \ 'getfield', 'iload', 'iconst_3', 'iadd', 'iload_3', \ 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_4', 'iadd', 'iload', 'iastore', \ 'aload_0', 'getfield', 'iload', 'iconst_5', 'iadd', \ 'iload', 'iastore', 'aload_0', 'getfield', 'iload', \ 'bipush', 'iadd', 'iload', 'iastore', 'aload_0', \ 'getfield', 'iload', 'bipush', 'iadd', 'iload', \ 'iastore', 'iinc', 'iload', 'sipush', \ 'if_icmplt', 'iconst_0', 'istore', 'goto', \ 'iload', 'aload_0', 'getfield', 'iload', 'iaload', \ 'iadd', 'istore', 'iload_1', 'aload_0', \ 'getfield', 'iload', 'iconst_1', 'iadd', \ 'iaload', 'iadd', 'istore_1', 'iload_2', \ 'aload_0', 'getfield', 'iload', 'iconst_2', \ 'iadd', 'iaload', 'iadd', 'istore_2', \ 'iload_3', 'aload_0', 'getfield', 'iload', \ 'iconst_3', 'iadd', 'iaload', 'iadd', 'istore_3', \ 'iload', 'aload_0', 'getfield', 'iload', \ 'iconst_4', 'iadd', 'iaload', 'iadd', 'istore', \ 'iload', 'aload_0', 'getfield', 'iload', \ 'iconst_5', 'iadd', 'iaload', 'iadd', \ 'istore', 'iload', 'aload_0', 'getfield', \ 'iload', 'bipush', 'iadd', 'iaload', \ 'iadd', 'istore', 'iload', 'aload_0', \ 'getfield', 'iload', 'bipush', 'iadd', \ 'iaload', 'iadd', 'istore', 'iload', \ 'iload_1', 'bipush', 'ishl', 'ixor', 'istore', \ 'iload_3', 'iload', 'iadd', 'istore_3', \ 'iload_1', 'iload_2', 'iadd', 'istore_1', \ 'iload_1', 'iload_2', 'iconst_2', 'iushr', \ 'ixor', 'istore_1', 'iload', 'iload_1', \ 'iadd', 'istore', 'iload_2', 'iload_3', \ 'iadd', 'istore_2', 'iload_2', 'iload_3', \ 'bipush', 'ishl', 'ixor', 'istore_2', \ 'iload', 'iload_2', 'iadd', 'istore', \ 'iload_3', 'iload', 'iadd', 'istore_3', \ 'iload_3', 'iload', 'bipush', 'iushr', \ 'ixor', 'istore_3', 'iload', 'iload_3', \ 'iadd', 'istore', 'iload', 'iload', \ 'iadd', 'istore', 'iload', 'iload', 'bipush', \ 'ishl', 'ixor', 'istore', 'iload', 'iload', \ 'iadd', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iconst_4', 'iushr', \ 'ixor', 'istore', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'iadd', 'istore', 'iload', \ 'iload', 'bipush', 'ishl', 'ixor', 'istore', 'iload_1', \ 'iload', 'iadd', 'istore_1', 'iload', 'iload', 'iadd', \ 'istore', 'iload', 'iload', 'bipush', 'iushr', 'ixor', \ 'istore', 'iload_2', 'iload', \ 'iadd', 'istore_2', 'iload', 'iload_1', \ 'iadd', 'istore', 'aload_0', 'getfield', 'iload', \ 'iload', 'iastore', 'aload_0', 'getfield', \ 'iload', 'iconst_1', 'iadd', 'iload_1', 'iastore', \ 'aload_0', 'getfield', 'iload', 'iconst_2', 'iadd', \ 'iload_2', 'iastore', 'aload_0', 'getfield', \ 'iload', 'iconst_3', 'iadd', 'iload_3', 'iastore', \ 'aload_0', 'getfield', 'iload', 'iconst_4', 'iadd', \ 'iload', 'iastore', 'aload_0', 'getfield', 'iload', \ 'iconst_5', 'iadd', 'iload', 'iastore', 'aload_0', \ 'getfield', 'iload', 'bipush', 'iadd', 'iload', \ 'iastore', 'aload_0', 'getfield', 'iload', 'bipush', \ 'iadd', 'iload', 'iastore', 'iinc', 'iload', \ 'sipush', 'if_icmplt', 'aload_0', 'invokespecial', \ 'aload_0', 'sipush', 'putfield', 'return'] Generic_Methods = [ ['createFrame', createFrame], ['writeDWord', writeDWord], ['writeWordBigEndian', writeWordBigEndian], ['writeWord', writeWord], ['writeDWordBigEndian', writeDWordBigEndian], ['method403', method403], ['writeQWord', writeQWord], ['writeString', writeString], ['method424', method424], ['method425', method425], ['method431', method431], ['method432', method432], ['method433', method433], ['getNextKey', getNextKey], ['isaac', isaac], ['initializeKeySet', initializeKeySet], ['initializeKeySet3', initializeKeySet3], ['initializeKeySet4', initializeKeySet4], ['initializeKeySet2', initializeKeySet2] ]
injectnique/KnuckleHeadedMcSpazatron
GenericBytecode.py
Python
mit
46,794
0.013506
from itertools import permutations import re def create_formula(combination,numbers): formula = "" index = 0 for op in combination: formula += str(numbers[index]) + op index += 1 formula += numbers[index] return formula ''' Unnecessary Funtion ''' def evaluate(form): result = 0 for index in range(len(form)): if form[index] == "+": result += int(form[index+1]) index += 1 elif form[index] == "-": result -= int(form[index+1]) index += 1 elif form[index] == "*": result *= int(form[index+1]) index += 1 elif form[index] == "/": result //= int(form[index+1]) index += 1 else: result += int(form[index]) return result def countdown(numbers): rightCombinations = [] finalScore = numbers.pop() combinations = returnAllCombinations(len(numbers) - 1) perms = list(permutations(numbers)) for combination in combinations: for permut in perms: formula = create_formula(combination,permut) #form = re.split("([*+-/])",formula) #if int(evaluate(form)) == int(finalScore): if int(eval(formula)) == int(finalScore): rightCombinations.append(formula) return rightCombinations def returnAllCombinations(size): listFinal = [] for x in range(0,size): if len(listFinal) == 0: for y in range(0,4): if y == 0: listFinal.append("+") elif y == 1: listFinal.append("-") elif y == 2: listFinal.append("*") else: listFinal.append("/") else: newList = [] for l in listFinal: for y in range(0,4): newLine = list(l) if y == 0: newLine.append("+") elif y == 1: newLine.append("-") elif y == 2: newLine.append("*") else: newLine.append("/") newList.append(newLine) listFinal = list(newList) return listFinal out = open("output.txt",'w') for line in open("input.txt",'r'): for formula in countdown(line.split(" ")): out.write(formula) out.write("\n") out.write("\n\n")
F0lha/UJunior-Projects
DailyProgrammer/Challenge#318/src.py
Python
mit
2,546
0.008641
#!/usr/bin/env python # encoding: utf-8 import re from tornado.web import UIModule from conf.config import BT_PAGE_SIZE #TODO it is may not be good to put it here to make the pager class scattered class Pagination(UIModule): def render(self, page, uri, list_rows=BT_PAGE_SIZE): def gen_page_list(current_page=1, total_page=1, list_rows=BT_PAGE_SIZE): #TODO add ajax pager support return range(1, total_page + 1) def build_uri(uri, param, value): regx = re.compile("[\?&](%s=[^\?&]*)" % param) find = regx.search(uri) split = "&" if re.search(r"\?", uri) else "?" if not find: return "%s%s%s=%s" % (uri, split, param, value) return re.sub(find.group(1), "%s=%s" % (param, value), uri) return self.render_string("pagination.html", page=page, uri=uri, gen_page_list=gen_page_list, list_rows=list_rows, build_uri=build_uri)
wangjun/BT-Share
web/module/module.py
Python
mit
955
0.006283
import os from os.path import abspath, basename, dirname, join, normpath from sys import path import dj_database_url from .settings import * DEBUG = True PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles') STATIC_URL = '/static/' # Extra places for collectstatic to find static files. STATICFILES_DIRS = ( os.path.join(PROJECT_ROOT, 'static'), ) DATABASES['default'] = dj_database_url.config() ROOT_URLCONF = 'chnnlsdmo.chnnlsdmo.urls'
shearichard/django-channels-demo
chnnlsdmo/chnnlsdmo/settings_heroku.py
Python
bsd-3-clause
510
0.005882
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-08-17 17:37 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('boards', '0017_card_blocking_cards'), ] operations = [ migrations.AddField( model_name='list', name='position', field=models.PositiveIntegerField(default=0, verbose_name='Position of this list in the board'), ), ]
diegojromerolopez/djanban
src/djanban/apps/boards/migrations/0018_list_position.py
Python
mit
505
0.00198
# Copyright 2015 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings try: from requests.packages.urllib3.exceptions import InsecurePlatformWarning except ImportError: try: from urllib3.exceptions import InsecurePlatformWarning except ImportError: InsecurePlatformWarning = None try: from requests.packages.urllib3.exceptions import InsecureRequestWarning except ImportError: try: from urllib3.exceptions import InsecureRequestWarning except ImportError: InsecureRequestWarning = None try: from requests.packages.urllib3.exceptions import SubjectAltNameWarning except ImportError: try: from urllib3.exceptions import SubjectAltNameWarning except ImportError: SubjectAltNameWarning = None try: from requests.packages.urllib3.exceptions import SNIMissingWarning except ImportError: try: from urllib3.exceptions import SNIMissingWarning except ImportError: SNIMissingWarning = None def squelch_warnings(insecure_requests=True): if SubjectAltNameWarning: warnings.filterwarnings('ignore', category=SubjectAltNameWarning) if InsecurePlatformWarning: warnings.filterwarnings('ignore', category=InsecurePlatformWarning) if SNIMissingWarning: warnings.filterwarnings('ignore', category=SNIMissingWarning) if insecure_requests and InsecureRequestWarning: warnings.filterwarnings('ignore', category=InsecureRequestWarning)
ctrlaltdel/neutrinator
vendor/requestsexceptions/__init__.py
Python
gpl-3.0
2,032
0
#!/usr/bin/python2.7 from nassl._nassl import SSL from SslClient import SslClient class DebugSslClient(SslClient): """ An SSL client with additional debug methods that no one should ever use (insecure renegotiation, etc.). """ def get_secure_renegotiation_support(self): return self._ssl.get_secure_renegotiation_support() def get_current_compression_method(self): return self._ssl.get_current_compression_method() @staticmethod def get_available_compression_methods(): """ Returns the list of SSL compression methods supported by SslClient. """ return SSL.get_available_compression_methods() def do_renegotiate(self): """Initiate an SSL renegotiation.""" if not self._handshakeDone: raise IOError('SSL Handshake was not completed; cannot renegotiate.') self._ssl.renegotiate() return self.do_handshake() def get_session(self): """Get the SSL connection's Session object.""" return self._ssl.get_session() def set_session(self, sslSession): """Set the SSL connection's Session object.""" return self._ssl.set_session(sslSession) def set_options(self, options): return self._ssl.set_options(options) def get_dh_param(self): """Retrieve the negotiated Ephemeral Diffie Helmann parameters.""" d = self._openssl_str_to_dic(self._ssl.get_dh_param()) d['GroupSize'] = d.pop('DH_Parameters').strip('( bit)') d['Type'] = "DH" d['Generator'] = d.pop('generator').split(' ')[0] return d def get_ecdh_param(self): """Retrieve the negotiated Ephemeral EC Diffie Helmann parameters.""" d = self._openssl_str_to_dic(self._ssl.get_ecdh_param(), ' ') d['GroupSize'] = d.pop('ECDSA_Parameters').strip('( bit)') d['Type'] = "ECDH" if 'Cofactor' in d : d['Cofactor'] = d['Cofactor'].split(' ')[0] for k in d.keys() : if k.startswith('Generator') : d['Generator'] = d.pop(k) d['GeneratorType'] = k.split('_')[1].strip('()') break else : d['GeneratorType'] = 'Unknown' return d @staticmethod def _openssl_str_to_dic(s, param_tab=' ') : """EDH and ECDH parameters pretty-printing.""" d = {} to_XML = lambda x : "_".join(m for m in x.replace('-', ' ').split(' ')) current_arg = None for l in s.splitlines() : if not l.startswith(param_tab) : if current_arg : d[current_arg] = "0x"+d[current_arg].replace(':', '') current_arg = None args = tuple(arg.strip() for arg in l.split(':') if arg.strip()) if len(args) > 1 : # one line parameter d[to_XML(args[0])] = args[1] else : # multi-line parameter current_arg = to_XML(args[0]) d[current_arg] = '' else : d[current_arg] += l.strip() if current_arg : d[current_arg] = "0x"+d[current_arg].replace(':', '') return d
ZenSecurity/nassl
src/DebugSslClient.py
Python
gpl-2.0
3,283
0.009138
import bounds from py3D import Vector, Ray, Color, Body class Sphere(Body): center = Vector() radius = 0.0 R = 0.0 color = [0.01,0.01,0.01] def p(self): """Returns the name of the type of body this is.""" return 'Sphere' def set_position(self, c): self.center = c return self def set_radius(self, r): self.radius = abs(r) self.R = r ** 2.0 return self def set_color(self, c): self.color = c return self def get_color(self, point): """Returns color of body at given point.""" return self.color.dup() def normal(self, point): """Returns normal vector of body at given point.""" return (point - self.center).scale(1/self.radius) def set_reflectivity(self, r): self._r = max(0.0,min(1.0,r)) return self def reflectivity(self, point): """Returns percentage of brightness due to specular reflection.""" return self._r def __init__(self, center, radius, color = Color()): Body.__init__(self) self.set_position(center) self.set_radius(radius) self.set_color(color) self.set_reflectivity(0.2) # Intersection of ray with a sphere boils down to the solutions to a # quadratic vector equation. # # Let S be the vector from sphere center to ray origin, D be ray direction # and R be the square of the radius of the sphere # # Then call S dot S SS, and, similarly, SD is S dot D # # Now the intersections occur at the following distances: # -SD +/- sqrt(SD**2 + R - SS) def intersection(self, ray): """Returns distance from ray to closest intersection with sphere.""" S = ray.o - self.center SD = S.dot( ray.d ) SS = S.dot(S) # no hit if sphere is really far away if SS > bounds.too_far ** 2: return -1.0 radical = SD ** 2 + self.R - SS # negative radical implies no solutions if radical < 0.0: return -1.0 radical **= 0.5 hit = -1 * SD - radical if hit < bounds.too_close: hit = -1 * SD + radical if hit < bounds.too_small: return -1.0 else: return hit else: return hit
dburggie/py3D
bodies/Sphere.py
Python
mit
2,470
0.011741
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'TwitterRecentEntriesItem' db.create_table(u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem', ( (u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('twitter_user', self.gf('django.db.models.fields.CharField')(max_length=75)), ('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)), ('widget_id', self.gf('django.db.models.fields.CharField')(max_length=75)), ('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal(u'fluentcms_twitterfeed', ['TwitterRecentEntriesItem']) # Adding model 'TwitterSearchItem' db.create_table(u'contentitem_fluentcms_twitterfeed_twittersearchitem', ( (u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('query', self.gf('django.db.models.fields.CharField')(default='', max_length=200)), ('amount', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=5)), ('widget_id', self.gf('django.db.models.fields.CharField')(max_length=75)), ('footer_text', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)), ('include_replies', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal(u'fluentcms_twitterfeed', ['TwitterSearchItem']) def backwards(self, orm): # Deleting model 'TwitterRecentEntriesItem' db.delete_table(u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem') # Deleting model 'TwitterSearchItem' db.delete_table(u'contentitem_fluentcms_twitterfeed_twittersearchitem') models = { u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'fluent_contents.contentitem': { 'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}), 'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}) }, 'fluent_contents.placeholder': { 'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}), 'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}), 'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}) }, u'fluentcms_twitterfeed.twitterrecententriesitem': { 'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterRecentEntriesItem', 'db_table': "u'contentitem_fluentcms_twitterfeed_twitterrecententriesitem'", '_ormbases': ['fluent_contents.ContentItem']}, 'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}), u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}), 'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '75'}), 'widget_id': ('django.db.models.fields.CharField', [], {'max_length': '75'}) }, u'fluentcms_twitterfeed.twittersearchitem': { 'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'TwitterSearchItem', 'db_table': "u'contentitem_fluentcms_twitterfeed_twittersearchitem'", '_ormbases': ['fluent_contents.ContentItem']}, 'amount': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '5'}), u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}), 'footer_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'include_replies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'query': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'widget_id': ('django.db.models.fields.CharField', [], {'max_length': '75'}) } } complete_apps = ['fluentcms_twitterfeed']
bashu/fluentcms-twitterfeed
fluentcms_twitterfeed/south_migrations/0001_initial.py
Python
apache-2.0
7,116
0.00801
#!/usr/bin/env python """Base class for all FAUCET unit tests.""" # pylint: disable=missing-docstring # pylint: disable=too-many-arguments import collections import glob import ipaddress import json import os import random import re import shutil import subprocess import time import unittest import yaml import requests from requests.exceptions import ConnectionError # pylint: disable=import-error from mininet.log import error, output from mininet.net import Mininet from mininet.node import Intf from mininet.util import dumpNodeConnections, pmonitor from ryu.ofproto import ofproto_v1_3 as ofp import faucet_mininet_test_util import faucet_mininet_test_topo class FaucetTestBase(unittest.TestCase): """Base class for all FAUCET unit tests.""" ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss' FAUCET_VIPV4 = ipaddress.ip_interface(u'10.0.0.254/24') FAUCET_VIPV4_2 = ipaddress.ip_interface(u'172.16.0.254/24') FAUCET_VIPV6 = ipaddress.ip_interface(u'fc00::1:254/64') FAUCET_VIPV6_2 = ipaddress.ip_interface(u'fc01::1:254/64') OFCTL = 'ovs-ofctl -OOpenFlow13' BOGUS_MAC = '01:02:03:04:05:06' FAUCET_MAC = '0e:00:00:00:00:01' LADVD = 'ladvd -e lo -f' ONEMBPS = (1024 * 1024) DB_TIMEOUT = 5 ACL_CONFIG = '' CONFIG = '' CONFIG_GLOBAL = '' GAUGE_CONFIG_DBS = '' N_UNTAGGED = 0 N_TAGGED = 0 NUM_DPS = 1 RUN_GAUGE = True REQUIRES_METERS = False PORT_ACL_TABLE = 0 VLAN_TABLE = 1 VLAN_ACL_TABLE = 2 ETH_SRC_TABLE = 3 IPV4_FIB_TABLE = 4 IPV6_FIB_TABLE = 5 VIP_TABLE = 6 FLOOD_TABLE = 8 ETH_DST_TABLE = 7 config = None dpid = None hardware = 'Open vSwitch' hw_switch = False gauge_controller = None gauge_of_port = None prom_port = None net = None of_port = None ctl_privkey = None ctl_cert = None ca_certs = None port_map = {'port_1': 1, 'port_2': 2, 'port_3': 3, 'port_4': 4} switch_map = {} tmpdir = None net = None topo = None cpn_intf = None config_ports = {} env = collections.defaultdict(dict) rand_dpids = set() def __init__(self, name, config, root_tmpdir, ports_sock, max_test_load): super(FaucetTestBase, self).__init__(name) self.config = config self.root_tmpdir = root_tmpdir self.ports_sock = ports_sock self.max_test_load = max_test_load def rand_dpid(self): reserved_range = 100 while True: dpid = random.randint(1, (2**32 - reserved_range)) + reserved_range if dpid not in self.rand_dpids: self.rand_dpids.add(dpid) return str(dpid) def _set_var(self, controller, var, value): self.env[controller][var] = value def _set_var_path(self, controller, var, path): self._set_var(controller, var, os.path.join(self.tmpdir, path)) def _set_prom_port(self, name='faucet'): self._set_var(name, 'FAUCET_PROMETHEUS_PORT', str(self.prom_port)) self._set_var(name, 'FAUCET_PROMETHEUS_ADDR', faucet_mininet_test_util.LOCALHOST) def _set_static_vars(self): self._set_var_path('faucet', 'FAUCET_CONFIG', 'faucet.yaml') self._set_var_path('faucet', 'FAUCET_ACL_CONFIG', 'faucet-acl.yaml') self._set_var_path('faucet', 'FAUCET_LOG', 'faucet.log') self._set_var_path('faucet', 'FAUCET_EXCEPTION_LOG', 'faucet-exception.log') self._set_var_path('gauge', 'GAUGE_CONFIG', 'gauge.yaml') self._set_var_path('gauge', 'GAUGE_LOG', 'gauge.log') self._set_var_path('gauge', 'GAUGE_EXCEPTION_LOG', 'gauge-exception.log') self.faucet_config_path = self.env['faucet']['FAUCET_CONFIG'] self.faucet_acl_config_path = self.env['faucet']['FAUCET_ACL_CONFIG'] self.gauge_config_path = self.env['gauge']['GAUGE_CONFIG'] self.debug_log_path = os.path.join( self.tmpdir, 'ofchannel.log') self.monitor_stats_file = os.path.join( self.tmpdir, 'ports.txt') self.monitor_state_file = os.path.join( self.tmpdir, 'state.txt') self.monitor_flow_table_file = os.path.join( self.tmpdir, 'flow.txt') if self.config is not None: if 'hw_switch' in self.config: self.hw_switch = self.config['hw_switch'] if self.hw_switch: self.dpid = self.config['dpid'] self.cpn_intf = self.config['cpn_intf'] self.hardware = self.config['hardware'] if 'ctl_privkey' in self.config: self.ctl_privkey = self.config['ctl_privkey'] if 'ctl_cert' in self.config: self.ctl_cert = self.config['ctl_cert'] if 'ca_certs' in self.config: self.ca_certs = self.config['ca_certs'] dp_ports = self.config['dp_ports'] self.port_map = {} self.switch_map = {} for i, switch_port in enumerate(dp_ports): test_port_name = 'port_%u' % (i + 1) self.port_map[test_port_name] = switch_port self.switch_map[test_port_name] = dp_ports[switch_port] def _set_vars(self): self._set_prom_port() def _write_faucet_config(self): faucet_config = '\n'.join(( self.get_config_header( self.CONFIG_GLOBAL.format(tmpdir=self.tmpdir), self.debug_log_path, self.dpid, self.hardware), self.CONFIG % self.port_map)) if self.config_ports: faucet_config = faucet_config % self.config_ports with open(self.faucet_config_path, 'w') as faucet_config_file: faucet_config_file.write(faucet_config) if self.ACL_CONFIG and self.ACL_CONFIG != '': with open(self.faucet_acl_config_path, 'w') as faucet_acl_config_file: faucet_acl_config_file.write(self.ACL_CONFIG % self.port_map) def _write_gauge_config(self): gauge_config = self.get_gauge_config( self.faucet_config_path, self.monitor_stats_file, self.monitor_state_file, self.monitor_flow_table_file) if self.config_ports: gauge_config = gauge_config % self.config_ports with open(self.gauge_config_path, 'w') as gauge_config_file: gauge_config_file.write(gauge_config) def _test_name(self): return faucet_mininet_test_util.flat_test_name(self.id()) def _tmpdir_name(self): tmpdir = os.path.join(self.root_tmpdir, self._test_name()) os.mkdir(tmpdir) return tmpdir def _controller_lognames(self): lognames = [] for controller in self.net.controllers: logname = controller.logname() if os.path.exists(logname) and os.path.getsize(logname) > 0: lognames.append(logname) return lognames def _wait_load(self, load_retries=120): for _ in range(load_retries): load = os.getloadavg()[0] time.sleep(random.randint(1, 7)) if load < self.max_test_load: return output('load average too high %f, waiting' % load) self.fail('load average %f consistently too high' % load) def _allocate_config_ports(self): for port_name in list(self.config_ports.keys()): self.config_ports[port_name] = None for config in (self.CONFIG, self.CONFIG_GLOBAL, self.GAUGE_CONFIG_DBS): if re.search(port_name, config): port = faucet_mininet_test_util.find_free_port( self.ports_sock, self._test_name()) self.config_ports[port_name] = port output('allocating port %u for %s' % (port, port_name)) def _allocate_faucet_ports(self): if self.hw_switch: self.of_port = self.config['of_port'] else: self.of_port = faucet_mininet_test_util.find_free_port( self.ports_sock, self._test_name()) self.prom_port = faucet_mininet_test_util.find_free_port( self.ports_sock, self._test_name()) def _allocate_gauge_ports(self): if self.hw_switch: self.gauge_of_port = self.config['gauge_of_port'] else: self.gauge_of_port = faucet_mininet_test_util.find_free_port( self.ports_sock, self._test_name()) def setUp(self): self.tmpdir = self._tmpdir_name() self._set_static_vars() if self.hw_switch: self.topo_class = faucet_mininet_test_topo.FaucetHwSwitchTopo self.dpid = faucet_mininet_test_util.str_int_dpid(self.dpid) else: self.topo_class = faucet_mininet_test_topo.FaucetSwitchTopo self.dpid = self.rand_dpid() def tearDown(self): """Clean up after a test.""" with open(os.path.join(self.tmpdir, 'prometheus.log'), 'w') as prom_log: prom_log.write(self.scrape_prometheus()) if self.net is not None: self.net.stop() self.net = None faucet_mininet_test_util.return_free_ports( self.ports_sock, self._test_name()) if 'OVS_LOGDIR' in os.environ: ovs_log_dir = os.environ['OVS_LOGDIR'] if ovs_log_dir and os.path.exists(ovs_log_dir): for ovs_log in glob.glob(os.path.join(ovs_log_dir, '*.log')): shutil.copy(ovs_log, self.tmpdir) # must not be any controller exception. self.verify_no_exception(self.env['faucet']['FAUCET_EXCEPTION_LOG']) for _, debug_log_name in self._get_ofchannel_logs(): with open(debug_log_name) as debug_log: self.assertFalse( re.search('OFPErrorMsg', debug_log.read()), msg='debug log has OFPErrorMsgs') def _attach_physical_switch(self): """Bridge a physical switch into test topology.""" switch = self.net.switches[0] mapped_base = max(len(self.switch_map), len(self.port_map)) for i, test_host_port in enumerate(sorted(self.switch_map)): port_i = i + 1 mapped_port_i = mapped_base + port_i phys_port = Intf(self.switch_map[test_host_port], node=switch) switch.cmd('ip link set dev %s up' % phys_port) switch.cmd( ('ovs-vsctl add-port %s %s -- ' 'set Interface %s ofport_request=%u') % ( switch.name, phys_port.name, phys_port.name, mapped_port_i)) for port_pair in ((port_i, mapped_port_i), (mapped_port_i, port_i)): port_x, port_y = port_pair switch.cmd('%s add-flow %s in_port=%u,actions=output:%u' % ( self.OFCTL, switch.name, port_x, port_y)) def start_net(self): """Start Mininet network.""" controller_intf = 'lo' if self.hw_switch: controller_intf = self.cpn_intf self._start_faucet(controller_intf) self.pre_start_net() if self.hw_switch: self._attach_physical_switch() self._wait_debug_log() for port_no in self._dp_ports(): self.set_port_up(port_no, wait=False) dumpNodeConnections(self.net.hosts) self.reset_all_ipv4_prefix(prefix=24) def _get_controller(self): """Return first controller.""" return self.net.controllers[0] def _start_gauge_check(self): return None def _start_check(self): if not self._wait_controllers_healthy(): return 'not all controllers healthy' if not self._wait_controllers_connected(): return 'not all controllers connected to switch' if not self._wait_ofctl_up(): return 'ofctl not up' if not self.wait_dp_status(1): return 'prometheus port not up' if self.config_ports: for port_name, port in list(self.config_ports.items()): if port is not None and not port_name.startswith('gauge'): if not self._get_controller().listen_port(port): return 'faucet not listening on %u (%s)' % ( port, port_name) return self._start_gauge_check() def _start_faucet(self, controller_intf): last_error_txt = '' for _ in range(3): faucet_mininet_test_util.return_free_ports( self.ports_sock, self._test_name()) self._allocate_config_ports() self._allocate_faucet_ports() self._set_vars() self._write_faucet_config() self.net = Mininet( self.topo, controller=faucet_mininet_test_topo.FAUCET( name='faucet', tmpdir=self.tmpdir, controller_intf=controller_intf, env=self.env['faucet'], ctl_privkey=self.ctl_privkey, ctl_cert=self.ctl_cert, ca_certs=self.ca_certs, ports_sock=self.ports_sock, port=self.of_port, test_name=self._test_name(), switch=self.topo.switches()[0])) if self.RUN_GAUGE: self._allocate_gauge_ports() self._write_gauge_config() self.gauge_controller = faucet_mininet_test_topo.Gauge( name='gauge', tmpdir=self.tmpdir, env=self.env['gauge'], controller_intf=controller_intf, ctl_privkey=self.ctl_privkey, ctl_cert=self.ctl_cert, ca_certs=self.ca_certs, port=self.gauge_of_port) self.net.addController(self.gauge_controller) self.net.start() self._wait_load() last_error_txt = self._start_check() if last_error_txt is None: self._config_tableids() self._wait_load() return self.net.stop() last_error_txt += '\n\n' + self._dump_controller_logs() error('%s: %s' % (self._test_name(), last_error_txt)) time.sleep(faucet_mininet_test_util.MIN_PORT_AGE) self.fail(last_error_txt) def _ofctl_rest_url(self, req): """Return control URL for Ryu ofctl module.""" return 'http://%s:%u/%s' % ( faucet_mininet_test_util.LOCALHOST, self._get_controller().ofctl_port, req) def _ofctl(self, req): try: ofctl_result = requests.get(req).text except ConnectionError: return None return ofctl_result def _ofctl_up(self): switches = self._ofctl(self._ofctl_rest_url('stats/switches')) return switches is not None and re.search(r'^\[[^\]]+\]$', switches) def _wait_ofctl_up(self, timeout=10): for _ in range(timeout): if self._ofctl_up(): return True time.sleep(1) return False def _ofctl_get(self, int_dpid, req, timeout): for _ in range(timeout): ofctl_result = self._ofctl(self._ofctl_rest_url(req)) try: ofmsgs = json.loads(ofctl_result)[int_dpid] return [json.dumps(ofmsg) for ofmsg in ofmsgs] except ValueError: # Didn't get valid JSON, try again time.sleep(1) continue return [] def _curl_portmod(self, int_dpid, port_no, config, mask): """Use curl to send a portmod command via the ofctl module.""" curl_format = ' '.join(( 'curl -X POST -d', '\'{"dpid": %s, "port_no": %u, "config": %u, "mask": %u}\'', self._ofctl_rest_url('stats/portdesc/modify'))) return curl_format % (int_dpid, port_no, config, mask) def _signal_proc_on_port(self, host, port, signal): tcp_pattern = '%s/tcp' % port fuser_out = host.cmd('fuser %s -k -%u' % (tcp_pattern, signal)) return re.search(r'%s:\s+\d+' % tcp_pattern, fuser_out) def _get_ofchannel_logs(self): with open(self.env['faucet']['FAUCET_CONFIG']) as config_file: config = yaml.load(config_file) ofchannel_logs = [] for dp_name, dp_config in config['dps'].items(): if 'ofchannel_log' in dp_config: debug_log = dp_config['ofchannel_log'] ofchannel_logs.append((dp_name, debug_log)) return ofchannel_logs def _dump_controller_logs(self): dump_txt = '' test_logs = glob.glob(os.path.join(self.tmpdir, '*.log')) for controller in self.net.controllers: for test_log_name in test_logs: basename = os.path.basename(test_log_name) if basename.startswith(controller.name): with open(test_log_name) as test_log: dump_txt += '\n'.join(( '', basename, '=' * len(basename), '', test_log.read())) break return dump_txt def _controllers_healthy(self): for controller in self.net.controllers: if not controller.healthy(): return False return True def _controllers_connected(self): for controller in self.net.controllers: if not controller.connected(): return False return True def _wait_controllers_healthy(self, timeout=30): for _ in range(timeout): if self._controllers_healthy(): return True time.sleep(1) return False def _wait_controllers_connected(self, timeout=30): for _ in range(timeout): if self._controllers_connected(): return True time.sleep(1) return False def _wait_debug_log(self): """Require all switches to have exchanged flows with controller.""" ofchannel_logs = self._get_ofchannel_logs() for _, debug_log in ofchannel_logs: for _ in range(60): if (os.path.exists(debug_log) and os.path.getsize(debug_log) > 0): return True time.sleep(1) return False def verify_no_exception(self, exception_log_name): if not os.path.exists(exception_log_name): return with open(exception_log_name) as exception_log: exception_contents = exception_log.read() self.assertEqual( '', exception_contents, msg='%s log contains %s' % ( exception_log_name, exception_contents)) def tcpdump_helper(self, tcpdump_host, tcpdump_filter, funcs=None, vflags='-v', timeout=10, packets=2, root_intf=False): intf = tcpdump_host.intf().name if root_intf: intf = intf.split('.')[0] tcpdump_cmd = faucet_mininet_test_util.timeout_soft_cmd( 'tcpdump -i %s -e -n -U %s -c %u %s' % ( intf, vflags, packets, tcpdump_filter), timeout) tcpdump_out = tcpdump_host.popen( tcpdump_cmd, stdin=faucet_mininet_test_util.DEVNULL, stderr=subprocess.STDOUT, close_fds=True) popens = {tcpdump_host: tcpdump_out} tcpdump_started = False tcpdump_txt = '' for host, line in pmonitor(popens): if host == tcpdump_host: if tcpdump_started: tcpdump_txt += line.strip() elif re.search('tcpdump: listening on ', line): # when we see tcpdump start, then call provided functions. tcpdump_started = True if funcs is not None: for func in funcs: func() else: error('tcpdump_helper: %s' % line) self.assertTrue(tcpdump_started, msg='%s did not start' % tcpdump_cmd) return tcpdump_txt def pre_start_net(self): """Hook called after Mininet initializtion, before Mininet started.""" return def get_config_header(self, config_global, debug_log, dpid, hardware): """Build v2 FAUCET config header.""" return """ %s dps: faucet-1: ofchannel_log: %s dp_id: 0x%x hardware: "%s" """ % (config_global, debug_log, int(dpid), hardware) def get_gauge_watcher_config(self): return """ port_stats: dps: ['faucet-1'] type: 'port_stats' interval: 5 db: 'stats_file' port_state: dps: ['faucet-1'] type: 'port_state' interval: 5 db: 'state_file' flow_table: dps: ['faucet-1'] type: 'flow_table' interval: 5 db: 'flow_file' """ def get_gauge_config(self, faucet_config_file, monitor_stats_file, monitor_state_file, monitor_flow_table_file): """Build Gauge config.""" return """ faucet_configs: - %s watchers: %s dbs: stats_file: type: 'text' file: %s state_file: type: 'text' file: %s flow_file: type: 'text' file: %s couchdb: type: gaugedb gdb_type: nosql nosql_db: couch db_username: couch db_password: 123 db_ip: 'localhost' db_port: 5001 driver: 'couchdb' views: switch_view: '_design/switches/_view/switch' match_view: '_design/flows/_view/match' tag_view: '_design/tags/_view/tags' switches_doc: 'switches_bak' flows_doc: 'flows_bak' db_update_counter: 2 %s """ % (faucet_config_file, self.get_gauge_watcher_config(), monitor_stats_file, monitor_state_file, monitor_flow_table_file, self.GAUGE_CONFIG_DBS) def get_exabgp_conf(self, peer, peer_config=''): return """ neighbor %s { router-id 2.2.2.2; local-address %s; connect %s; peer-as 1; local-as 2; %s } """ % (peer, peer, '%(bgp_port)d', peer_config) def get_all_groups_desc_from_dpid(self, dpid, timeout=2): int_dpid = faucet_mininet_test_util.str_int_dpid(dpid) return self._ofctl_get( int_dpid, 'stats/groupdesc/%s' % int_dpid, timeout) def get_all_flows_from_dpid(self, dpid, timeout=10): """Return all flows from DPID.""" int_dpid = faucet_mininet_test_util.str_int_dpid(dpid) return self._ofctl_get( int_dpid, 'stats/flow/%s' % int_dpid, timeout) def _port_stat(self, port_stats, port): if port_stats: for port_stat in port_stats: port_stat = json.loads(port_stat) if port_stat['port_no'] == port: return port_stat return None def get_port_stats_from_dpid(self, dpid, port, timeout=2): """Return port stats for a port.""" int_dpid = faucet_mininet_test_util.str_int_dpid(dpid) port_stats = self._ofctl_get( int_dpid, 'stats/port/%s' % int_dpid, timeout) return self._port_stat(port_stats, port) def get_port_desc_from_dpid(self, dpid, port, timeout=2): """Return port desc for a port.""" int_dpid = faucet_mininet_test_util.str_int_dpid(dpid) port_stats = self._ofctl_get( int_dpid, 'stats/portdesc/%s' % int_dpid, timeout) return self._port_stat(port_stats, port) def wait_matching_in_group_table(self, action, group_id, timeout=10): groupdump = os.path.join(self.tmpdir, 'groupdump-%s.txt' % self.dpid) for _ in range(timeout): group_dump = self.get_all_groups_desc_from_dpid(self.dpid, 1) with open(groupdump, 'w') as groupdump_file: for group_desc in group_dump: group_dict = json.loads(group_desc) groupdump_file.write(str(group_dict) + '\n') if group_dict['group_id'] == group_id: actions = set(group_dict['buckets'][0]['actions']) if set([action]).issubset(actions): return True time.sleep(1) return False def get_matching_flows_on_dpid(self, dpid, match, timeout=10, table_id=None, actions=None, match_exact=False): flowdump = os.path.join(self.tmpdir, 'flowdump-%s.txt' % dpid) with open(flowdump, 'w') as flowdump_file: for _ in range(timeout): flow_dicts = [] flow_dump = self.get_all_flows_from_dpid(dpid) for flow in flow_dump: flow_dict = json.loads(flow) flowdump_file.write(str(flow_dict) + '\n') if (table_id is not None and flow_dict['table_id'] != table_id): continue if actions is not None: if not set(actions).issubset(set(flow_dict['actions'])): continue if match is not None: if match_exact: if match.items() != flow_dict['match'].items(): continue elif not set(match.items()).issubset(set(flow_dict['match'].items())): continue flow_dicts.append(flow_dict) if flow_dicts: return flow_dicts time.sleep(1) return flow_dicts def get_matching_flow_on_dpid(self, dpid, match, timeout=10, table_id=None, actions=None, match_exact=None): flow_dicts = self.get_matching_flows_on_dpid( dpid, match, timeout=timeout, table_id=table_id, actions=actions, match_exact=match_exact) if flow_dicts: return flow_dicts[0] return [] def get_matching_flow(self, match, timeout=10, table_id=None, actions=None, match_exact=None): return self.get_matching_flow_on_dpid( self.dpid, match, timeout=timeout, table_id=table_id, actions=actions, match_exact=match_exact) def get_group_id_for_matching_flow(self, match, timeout=10, table_id=None): for _ in range(timeout): flow_dict = self.get_matching_flow( match, timeout=timeout, table_id=table_id) if flow_dict: for action in flow_dict['actions']: if action.startswith('GROUP'): _, group_id = action.split(':') return int(group_id) time.sleep(1) self.fail( 'Cannot find group_id for matching flow %s' % match) def matching_flow_present_on_dpid(self, dpid, match, timeout=10, table_id=None, actions=None, match_exact=None): """Return True if matching flow is present on a DPID.""" if self.get_matching_flow_on_dpid( dpid, match, timeout=timeout, table_id=table_id, actions=actions, match_exact=match_exact): return True return False def matching_flow_present(self, match, timeout=10, table_id=None, actions=None, match_exact=None): """Return True if matching flow is present on default DPID.""" return self.matching_flow_present_on_dpid( self.dpid, match, timeout=timeout, table_id=table_id, actions=actions, match_exact=match_exact) def wait_until_matching_flow(self, match, timeout=10, table_id=None, actions=None, match_exact=False): """Wait (require) for flow to be present on default DPID.""" self.assertTrue( self.matching_flow_present( match, timeout=timeout, table_id=table_id, actions=actions, match_exact=match_exact), msg=match) def wait_until_controller_flow(self): self.wait_until_matching_flow(None, actions=[u'OUTPUT:CONTROLLER']) def mac_learned(self, mac, timeout=10, in_port=None): """Return True if a MAC has been learned on default DPID.""" for eth_field, table_id in ( (u'dl_src', self.ETH_SRC_TABLE), (u'dl_dst', self.ETH_DST_TABLE)): match = {eth_field: u'%s' % mac} if in_port is not None and table_id == self.ETH_SRC_TABLE: match[u'in_port'] = in_port if not self.matching_flow_present( match, timeout=timeout, table_id=table_id): return False return True def host_learned(self, host, timeout=10, in_port=None): """Return True if a host has been learned on default DPID.""" return self.mac_learned(host.MAC(), timeout, in_port) def get_host_intf_mac(self, host, intf): return host.cmd('cat /sys/class/net/%s/address' % intf).strip() def host_ip(self, host, family, family_re): host_ip_cmd = ( r'ip -o -f %s addr show %s|' 'grep -m 1 -Eo "%s %s"|cut -f2 -d " "' % ( family, host.defaultIntf(), family, family_re)) return host.cmd(host_ip_cmd).strip() def host_ipv4(self, host): """Return first IPv4/netmask for host's default interface.""" return self.host_ip(host, 'inet', r'[0-9\\.]+\/[0-9]+') def host_ipv6(self, host): """Return first IPv6/netmask for host's default interface.""" return self.host_ip(host, 'inet6', r'[0-9a-f\:]+\/[0-9]+') def reset_ipv4_prefix(self, host, prefix=24): host.setIP(host.IP(), prefixLen=prefix) def reset_all_ipv4_prefix(self, prefix=24): for host in self.net.hosts: self.reset_ipv4_prefix(host, prefix) def require_host_learned(self, host, retries=8, in_port=None): """Require a host be learned on default DPID.""" host_ip_net = self.host_ipv4(host) if not host_ip_net: host_ip_net = self.host_ipv6(host) broadcast = ipaddress.ip_interface( unicode(host_ip_net)).network.broadcast_address broadcast_str = str(broadcast) packets = 1 if broadcast.version == 4: ping_cmd = 'ping -b' if broadcast.version == 6: ping_cmd = 'ping6' broadcast_str = 'ff02::1' # stimulate host learning with a broadcast ping ping_cli = faucet_mininet_test_util.timeout_cmd( '%s -I%s -W1 -c%u %s' % ( ping_cmd, host.defaultIntf().name, packets, broadcast_str), 3) for _ in range(retries): if self.host_learned(host, timeout=1, in_port=in_port): return ping_result = host.cmd(ping_cli) self.assertTrue(re.search( r'%u packets transmitted' % packets, ping_result), msg='%s: %s' % ( ping_cli, ping_result)) self.fail('host %s (%s) could not be learned (%s: %s)' % ( host, host.MAC(), ping_cli, ping_result)) def get_prom_port(self): return int(self.env['faucet']['FAUCET_PROMETHEUS_PORT']) def get_prom_addr(self): return self.env['faucet']['FAUCET_PROMETHEUS_ADDR'] def _prometheus_url(self, controller): if controller == 'faucet': return 'http://%s:%u' % ( self.get_prom_addr(), self.get_prom_port()) elif controller == 'gauge': return 'http://%s:%u' % ( self.get_prom_addr(), self.config_ports['gauge_prom_port']) def scrape_prometheus(self, controller='faucet'): url = self._prometheus_url(controller) try: prom_lines = requests.get(url).text.split('\n') except ConnectionError: return '' prom_vars = [] for prom_line in prom_lines: if not prom_line.startswith('#'): prom_vars.append(prom_line) return '\n'.join(prom_vars) def scrape_prometheus_var(self, var, labels=None, any_labels=False, default=None, dpid=True, multiple=False, controller='faucet', retries=1): label_values_re = r'' if any_labels: label_values_re = r'\{[^\}]+\}' else: if labels is None: labels = {} if dpid: labels.update({'dp_id': '0x%x' % long(self.dpid)}) if labels: label_values = [] for label, value in sorted(list(labels.items())): label_values.append('%s="%s"' % (label, value)) label_values_re = r'\{%s\}' % r'\S+'.join(label_values) var_re = r'^%s%s$' % (var, label_values_re) for _ in range(retries): results = [] prom_lines = self.scrape_prometheus(controller) for prom_line in prom_lines.splitlines(): prom_var_data = prom_line.split(' ') self.assertEqual( 2, len(prom_var_data), msg='invalid prometheus line in %s' % prom_lines) var, value = prom_var_data var_match = re.search(var_re, var) if var_match: value_int = long(float(value)) results.append((var, value_int)) if not multiple: break if results: if multiple: return results return results[0][1] time.sleep(1) return default def gauge_smoke_test(self): watcher_files = set([ self.monitor_stats_file, self.monitor_state_file, self.monitor_flow_table_file]) found_watcher_files = set() for _ in range(60): for watcher_file in watcher_files: if (os.path.exists(watcher_file) and os.path.getsize(watcher_file)): found_watcher_files.add(watcher_file) if watcher_files == found_watcher_files: break self.verify_no_exception(self.env['gauge']['GAUGE_EXCEPTION_LOG']) time.sleep(1) found_watcher_files = set() missing_watcher_files = watcher_files - found_watcher_files self.assertEqual( missing_watcher_files, set(), msg='Gauge missing logs: %s' % missing_watcher_files) self.hup_gauge() self.verify_no_exception(self.env['faucet']['FAUCET_EXCEPTION_LOG']) def prometheus_smoke_test(self): prom_out = self.scrape_prometheus() for nonzero_var in ( r'of_packet_ins', r'of_flowmsgs_sent', r'of_dp_connections', r'faucet_config\S+name=\"flood\"', r'faucet_pbr_version\S+version='): self.assertTrue( re.search(r'%s\S+\s+[1-9]+' % nonzero_var, prom_out), msg='expected %s to be nonzero (%s)' % (nonzero_var, prom_out)) for zero_var in ( 'of_errors', 'of_dp_disconnections'): self.assertTrue( re.search(r'%s\S+\s+0' % zero_var, prom_out), msg='expected %s to be present and zero (%s)' % (zero_var, prom_out)) def get_configure_count(self): """Return the number of times FAUCET has processed a reload request.""" for _ in range(3): count = self.scrape_prometheus_var( 'faucet_config_reload_requests', default=None, dpid=False) if count is not None: return count time.sleep(1) self.fail('configure count stayed zero') def hup_faucet(self): """Send a HUP signal to the controller.""" controller = self._get_controller() self.assertTrue( self._signal_proc_on_port(controller, controller.port, 1)) def hup_gauge(self): self.assertTrue( self._signal_proc_on_port( self.gauge_controller, int(self.gauge_of_port), 1)) def verify_controller_fping(self, host, faucet_vip, total_packets=100, packet_interval_ms=100): fping_bin = 'fping' if faucet_vip.version == 6: fping_bin = 'fping6' fping_cli = '%s -s -c %u -i %u -p 1 -T 1 %s' % ( fping_bin, total_packets, packet_interval_ms, faucet_vip.ip) timeout = int(((1000.0 / packet_interval_ms) * total_packets) * 1.5) fping_out = host.cmd(faucet_mininet_test_util.timeout_cmd( fping_cli, timeout)) error('%s: %s' % (self._test_name(), fping_out)) self.assertTrue( not re.search(r'\s+0 ICMP Echo Replies received', fping_out), msg=fping_out) def verify_vlan_flood_limited(self, vlan_first_host, vlan_second_host, other_vlan_host): """Verify that flooding doesn't cross VLANs.""" for first_host, second_host in ( (vlan_first_host, vlan_second_host), (vlan_second_host, vlan_first_host)): tcpdump_filter = 'ether host %s or ether host %s' % ( first_host.MAC(), second_host.MAC()) tcpdump_txt = self.tcpdump_helper( other_vlan_host, tcpdump_filter, [ lambda: first_host.cmd('arp -d %s' % second_host.IP()), lambda: first_host.cmd('ping -c1 %s' % second_host.IP())], packets=1) self.assertTrue( re.search('0 packets captured', tcpdump_txt), msg=tcpdump_txt) def verify_ping_mirrored(self, first_host, second_host, mirror_host): self.net.ping((first_host, second_host)) for host in (first_host, second_host): self.require_host_learned(host) self.retry_net_ping(hosts=(first_host, second_host)) mirror_mac = mirror_host.MAC() tcpdump_filter = ( 'not ether src %s and ' '(icmp[icmptype] == 8 or icmp[icmptype] == 0)') % mirror_mac first_ping_second = 'ping -c1 %s' % second_host.IP() tcpdump_txt = self.tcpdump_helper( mirror_host, tcpdump_filter, [ lambda: first_host.cmd(first_ping_second)]) self.assertTrue(re.search( '%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt) self.assertTrue(re.search( '%s: ICMP echo reply' % first_host.IP(), tcpdump_txt), msg=tcpdump_txt) def verify_eapol_mirrored(self, first_host, second_host, mirror_host): self.net.ping((first_host, second_host)) for host in (first_host, second_host): self.require_host_learned(host) self.retry_net_ping(hosts=(first_host, second_host)) mirror_mac = mirror_host.MAC() tmp_eap_conf = os.path.join(self.tmpdir, 'eap.conf') tcpdump_filter = ( 'not ether src %s and ether proto 0x888e' % mirror_mac) eap_conf_cmd = ( 'echo "eapol_version=2\nap_scan=0\nnetwork={\n' 'key_mgmt=IEEE8021X\neap=MD5\nidentity=\\"login\\"\n' 'password=\\"password\\"\n}\n" > %s' % tmp_eap_conf) wpa_supplicant_cmd = faucet_mininet_test_util.timeout_cmd( 'wpa_supplicant -c%s -Dwired -i%s -d' % ( tmp_eap_conf, first_host.defaultIntf().name), 5) tcpdump_txt = self.tcpdump_helper( mirror_host, tcpdump_filter, [ lambda: first_host.cmd(eap_conf_cmd), lambda: first_host.cmd(wpa_supplicant_cmd)]) self.assertTrue( re.search('01:80:c2:00:00:03, ethertype EAPOL', tcpdump_txt), msg=tcpdump_txt) def bogus_mac_flooded_to_port1(self): first_host, second_host, third_host = self.net.hosts[0:3] unicast_flood_filter = 'ether host %s' % self.BOGUS_MAC static_bogus_arp = 'arp -s %s %s' % (first_host.IP(), self.BOGUS_MAC) curl_first_host = 'curl -m 5 http://%s' % first_host.IP() tcpdump_txt = self.tcpdump_helper( first_host, unicast_flood_filter, [lambda: second_host.cmd(static_bogus_arp), lambda: second_host.cmd(curl_first_host), lambda: self.net.ping(hosts=(second_host, third_host))]) return not re.search('0 packets captured', tcpdump_txt) def verify_port1_unicast(self, unicast_status): # Unicast flooding rule for from port 1 self.assertEqual( self.matching_flow_present( {u'dl_vlan': u'100', u'in_port': int(self.port_map['port_1'])}, table_id=self.FLOOD_TABLE, match_exact=True), unicast_status) # Unicast flood rule exists that output to port 1 self.assertEqual( self.matching_flow_present( {u'dl_vlan': u'100', u'in_port': int(self.port_map['port_2'])}, table_id=self.FLOOD_TABLE, actions=[u'OUTPUT:%u' % self.port_map['port_1']], match_exact=True), unicast_status) def verify_lldp_blocked(self): first_host, second_host = self.net.hosts[0:2] lldp_filter = 'ether proto 0x88cc' ladvd_mkdir = 'mkdir -p /var/run/ladvd' send_lldp = '%s -L -o %s' % ( faucet_mininet_test_util.timeout_cmd(self.LADVD, 30), second_host.defaultIntf()) tcpdump_txt = self.tcpdump_helper( first_host, lldp_filter, [lambda: second_host.cmd(ladvd_mkdir), lambda: second_host.cmd(send_lldp), lambda: second_host.cmd(send_lldp), lambda: second_host.cmd(send_lldp)], timeout=20, packets=5) if re.search(second_host.MAC(), tcpdump_txt): return False return True def is_cdp_blocked(self): first_host, second_host = self.net.hosts[0:2] cdp_filter = 'ether host 01:00:0c:cc:cc:cc and ether[20:2]==0x2000' ladvd_mkdir = 'mkdir -p /var/run/ladvd' send_cdp = '%s -C -o %s' % ( faucet_mininet_test_util.timeout_cmd(self.LADVD, 30), second_host.defaultIntf()) tcpdump_txt = self.tcpdump_helper( first_host, cdp_filter, [lambda: second_host.cmd(ladvd_mkdir), lambda: second_host.cmd(send_cdp), lambda: second_host.cmd(send_cdp), lambda: second_host.cmd(send_cdp)], timeout=20, packets=5) if re.search(second_host.MAC(), tcpdump_txt): return False return True def verify_hup_faucet(self, timeout=3): """HUP and verify the HUP was processed.""" start_configure_count = self.get_configure_count() self.hup_faucet() for _ in range(timeout): configure_count = self.get_configure_count() if configure_count > start_configure_count: return time.sleep(1) self.fail('HUP not processed by FAUCET') def force_faucet_reload(self, new_config): """Force FAUCET to reload by adding new line to config file.""" with open(self.env['faucet']['FAUCET_CONFIG'], 'a') as config_file: config_file.write(new_config) self.verify_hup_faucet() def get_host_port_stats(self, hosts_switch_ports): port_stats = {} for host, switch_port in hosts_switch_ports: port_stats[host] = self.get_port_stats_from_dpid(self.dpid, switch_port) return port_stats def of_bytes_mbps(self, start_port_stats, end_port_stats, var, seconds): return (end_port_stats[var] - start_port_stats[var]) * 8 / seconds / self.ONEMBPS def verify_iperf_min(self, hosts_switch_ports, min_mbps, server_ip, iperf_port): """Verify minimum performance and OF counters match iperf approximately.""" seconds = 5 prop = 0.1 start_port_stats = self.get_host_port_stats(hosts_switch_ports) hosts = [] for host, _ in hosts_switch_ports: hosts.append(host) client_host, server_host = hosts iperf_mbps = self.iperf( client_host, server_host, server_ip, iperf_port, seconds) self.assertTrue(iperf_mbps > min_mbps) # TODO: account for drops. for _ in range(3): end_port_stats = self.get_host_port_stats(hosts_switch_ports) approx_match = True for host in hosts: of_rx_mbps = self.of_bytes_mbps( start_port_stats[host], end_port_stats[host], 'rx_bytes', seconds) of_tx_mbps = self.of_bytes_mbps( start_port_stats[host], end_port_stats[host], 'tx_bytes', seconds) output(of_rx_mbps, of_tx_mbps) max_of_mbps = float(max(of_rx_mbps, of_tx_mbps)) iperf_to_max = iperf_mbps / max_of_mbps msg = 'iperf: %fmbps, of: %fmbps (%f)' % ( iperf_mbps, max_of_mbps, iperf_to_max) output(msg) if ((iperf_to_max < (1.0 - prop)) or (iperf_to_max > (1.0 + prop))): approx_match = False if approx_match: return time.sleep(1) self.fail(msg=msg) def wait_port_status(self, port_no, expected_status, timeout=10): for _ in range(timeout): port_status = self.scrape_prometheus_var( 'port_status', {'port': port_no}, default=None) if port_status is not None and port_status == expected_status: return time.sleep(1) self.fail('port %s status %s != expected %u' % ( port_no, port_status, expected_status)) def set_port_status(self, port_no, status, wait): self.assertEqual( 0, os.system(self._curl_portmod( self.dpid, port_no, status, ofp.OFPPC_PORT_DOWN))) if wait: expected_status = 1 if status == ofp.OFPPC_PORT_DOWN: expected_status = 0 self.wait_port_status(port_no, expected_status) def set_port_down(self, port_no, wait=True): self.set_port_status(port_no, ofp.OFPPC_PORT_DOWN, wait) def set_port_up(self, port_no, wait=True): self.set_port_status(port_no, 0, wait) def wait_dp_status(self, expected_status, controller='faucet', timeout=60): for _ in range(timeout): dp_status = self.scrape_prometheus_var( 'dp_status', {}, controller=controller, default=None) if dp_status is not None and dp_status == expected_status: return True time.sleep(1) return False def _get_tableid(self, name): return self.scrape_prometheus_var( 'faucet_config_table_names', {'name': name}) def _config_tableids(self): self.PORT_ACL_TABLE = self._get_tableid('port_acl') self.VLAN_TABLE = self._get_tableid('vlan') self.VLAN_ACL_TABLE = self._get_tableid('vlan_acl') self.ETH_SRC_TABLE = self._get_tableid('eth_src') self.IPV4_FIB_TABLE = self._get_tableid('ipv4_fib') self.IPV6_FIB_TABLE = self._get_tableid('ipv6_fib') self.VIP_TABLE = self._get_tableid('vip') self.ETH_DST_TABLE = self._get_tableid('eth_dst') self.FLOOD_TABLE = self._get_tableid('flood') def _dp_ports(self): port_count = self.N_TAGGED + self.N_UNTAGGED return list(sorted(self.port_map.values()))[:port_count] def flap_all_switch_ports(self, flap_time=1): """Flap all ports on switch.""" for port_no in self._dp_ports(): self.set_port_down(port_no) time.sleep(flap_time) self.set_port_up(port_no) def add_macvlan(self, host, macvlan_intf): host.cmd('ip link add link %s %s type macvlan' % ( host.defaultIntf(), macvlan_intf)) host.cmd('ip link set dev %s up' % macvlan_intf) def add_host_ipv6_address(self, host, ip_v6, intf=None): """Add an IPv6 address to a Mininet host.""" if intf is None: intf = host.intf() self.assertEqual( '', host.cmd('ip -6 addr add %s dev %s' % (ip_v6, intf))) def add_host_route(self, host, ip_dst, ip_gw): """Add an IP route to a Mininet host.""" host.cmd('ip -%u route del %s' % ( ip_dst.version, ip_dst.network.with_prefixlen)) add_cmd = 'ip -%u route add %s via %s' % ( ip_dst.version, ip_dst.network.with_prefixlen, ip_gw) results = host.cmd(add_cmd) self.assertEqual( '', results, msg='%s: %s' % (add_cmd, results)) def _one_ip_ping(self, host, ping_cmd, retries, require_host_learned): if require_host_learned: self.require_host_learned(host) for _ in range(retries): ping_result = host.cmd(ping_cmd) print(ping_result) if re.search(self.ONE_GOOD_PING, ping_result): return self.assertTrue( re.search(self.ONE_GOOD_PING, ping_result), msg='%s: %s' % (ping_cmd, ping_result)) def one_ipv4_ping(self, host, dst, retries=3, require_host_learned=True, intf=None, netns=None): """Ping an IPv4 destination from a host.""" if intf is None: intf = host.defaultIntf() ping_cmd = 'ping -c1 -I%s %s' % (intf, dst) if netns is not None: ping_cmd = 'ip netns exec %s %s' % (netns, ping_cmd) return self._one_ip_ping(host, ping_cmd, retries, require_host_learned) def one_ipv4_controller_ping(self, host): """Ping the controller from a host with IPv4.""" self.one_ipv4_ping(host, self.FAUCET_VIPV4.ip) self.verify_ipv4_host_learned_mac( host, self.FAUCET_VIPV4.ip, self.FAUCET_MAC) def one_ipv6_ping(self, host, dst, retries=3): """Ping an IPv6 destination from a host.""" ping_cmd = 'ping6 -c1 %s' % dst return self._one_ip_ping(host, ping_cmd, retries, require_host_learned=True) def one_ipv6_controller_ping(self, host): """Ping the controller from a host with IPv6.""" self.one_ipv6_ping(host, self.FAUCET_VIPV6.ip) self.verify_ipv6_host_learned_mac( host, self.FAUCET_VIPV6.ip, self.FAUCET_MAC) def retry_net_ping(self, hosts=None, required_loss=0, retries=3): loss = None for _ in range(retries): if hosts is None: loss = self.net.pingAll() else: loss = self.net.ping(hosts) if loss <= required_loss: return time.sleep(1) self.fail('ping %f loss > required loss %f' % (loss, required_loss)) def tcp_port_free(self, host, port, ipv=4): listen_out = host.cmd( faucet_mininet_test_util.tcp_listening_cmd(port, ipv)) if listen_out: return listen_out return None def wait_for_tcp_free(self, host, port, timeout=10, ipv=4): """Wait for a host to start listening on a port.""" for _ in range(timeout): listen_out = self.tcp_port_free(host, port, ipv) if listen_out is None: return time.sleep(1) self.fail('%s busy on port %u (%s)' % (host, port, listen_out)) def wait_for_tcp_listen(self, host, port, timeout=10, ipv=4): """Wait for a host to start listening on a port.""" for _ in range(timeout): listen_out = self.tcp_port_free(host, port, ipv) if listen_out is not None: return time.sleep(1) self.fail('%s never listened on port %u' % (host, port)) def serve_hello_on_tcp_port(self, host, port): """Serve 'hello' on a TCP port on a host.""" host.cmd(faucet_mininet_test_util.timeout_cmd( 'echo hello | nc -l %s %u &' % (host.IP(), port), 10)) self.wait_for_tcp_listen(host, port) def wait_nonzero_packet_count_flow(self, match, timeout=10, table_id=None, actions=None): """Wait for a flow to be present and have a non-zero packet_count.""" for _ in range(timeout): flow = self.get_matching_flow(match, timeout=1, table_id=table_id, actions=actions) if flow and flow['packet_count'] > 0: return time.sleep(1) if flow: self.fail('flow %s matching %s had zero packet count' % (flow, match)) else: self.fail('no flow matching %s' % match) def verify_tp_dst_blocked(self, port, first_host, second_host, table_id=0, mask=None): """Verify that a TCP port on a host is blocked from another host.""" self.serve_hello_on_tcp_port(second_host, port) self.assertEqual( '', first_host.cmd(faucet_mininet_test_util.timeout_cmd( 'nc %s %u' % (second_host.IP(), port), 10))) if table_id is not None: if mask is None: match_port = int(port) else: match_port = '/'.join((str(port), str(mask))) self.wait_nonzero_packet_count_flow( {u'tp_dst': match_port}, table_id=table_id) def verify_tp_dst_notblocked(self, port, first_host, second_host, table_id=0, mask=None): """Verify that a TCP port on a host is NOT blocked from another host.""" self.serve_hello_on_tcp_port(second_host, port) self.assertEqual( 'hello\r\n', first_host.cmd('nc -w 5 %s %u' % (second_host.IP(), port))) if table_id is not None: self.wait_nonzero_packet_count_flow( {u'tp_dst': int(port)}, table_id=table_id) def swap_host_macs(self, first_host, second_host): """Swap the MAC addresses of two Mininet hosts.""" first_host_mac = first_host.MAC() second_host_mac = second_host.MAC() first_host.setMAC(second_host_mac) second_host.setMAC(first_host_mac) def start_exabgp(self, exabgp_conf, timeout=30): """Start exabgp process on controller host.""" exabgp_conf_file_name = os.path.join(self.tmpdir, 'exabgp.conf') exabgp_log = os.path.join(self.tmpdir, 'exabgp.log') exabgp_err = os.path.join(self.tmpdir, 'exabgp.err') exabgp_env = ' '.join(( 'exabgp.daemon.user=root', 'exabgp.log.all=true', 'exabgp.log.level=DEBUG', 'exabgp.log.destination=%s' % exabgp_log, )) bgp_port = self.config_ports['bgp_port'] exabgp_conf = exabgp_conf % {'bgp_port': bgp_port} with open(exabgp_conf_file_name, 'w') as exabgp_conf_file: exabgp_conf_file.write(exabgp_conf) controller = self._get_controller() exabgp_cmd = faucet_mininet_test_util.timeout_cmd( 'exabgp %s -d 2> %s > /dev/null &' % ( exabgp_conf_file_name, exabgp_err), 600) exabgp_cli = 'env %s %s' % (exabgp_env, exabgp_cmd) controller.cmd(exabgp_cli) for _ in range(timeout): if os.path.exists(exabgp_log): return (exabgp_log, exabgp_err) time.sleep(1) self.fail('exabgp (%s) did not start' % exabgp_cli) def wait_bgp_up(self, neighbor, vlan, exabgp_log, exabgp_err): """Wait for BGP to come up.""" label_values = { 'neighbor': neighbor, 'vlan': vlan, } for _ in range(60): uptime = self.scrape_prometheus_var( 'bgp_neighbor_uptime', label_values, default=0) if uptime > 0: return time.sleep(1) exabgp_log_content = [] for log_name in (exabgp_log, exabgp_err): if os.path.exists(log_name): with open(log_name) as log: exabgp_log_content.append(log.read()) self.fail('exabgp did not peer with FAUCET: %s' % '\n'.join(exabgp_log_content)) def exabgp_updates(self, exabgp_log): """Verify that exabgp process has received BGP updates.""" controller = self._get_controller() # exabgp should have received our BGP updates for _ in range(60): updates = controller.cmd( r'grep UPDATE %s |grep -Eo "\S+ next-hop \S+"' % exabgp_log) if updates: return updates time.sleep(1) self.fail('exabgp did not receive BGP updates') def wait_exabgp_sent_updates(self, exabgp_log_name): """Verify that exabgp process has sent BGP updates.""" for _ in range(60): with open(exabgp_log_name) as exabgp_log: exabgp_log_content = exabgp_log.read() if re.search(r'>> [1-9]+[0-9]* UPDATE', exabgp_log_content): return time.sleep(1) self.fail('exabgp did not send BGP updates') def ping_all_when_learned(self, retries=3): """Verify all hosts can ping each other once FAUCET has learned all.""" # Cause hosts to send traffic that FAUCET can use to learn them. for _ in range(retries): loss = self.net.pingAll() # we should have learned all hosts now, so should have no loss. for host in self.net.hosts: self.require_host_learned(host) if loss == 0: return self.assertEqual(0, loss) def wait_for_route_as_flow(self, nexthop, prefix, vlan_vid=None, timeout=10, with_group_table=False, nonzero_packets=False): """Verify a route has been added as a flow.""" exp_prefix = u'%s/%s' % ( prefix.network_address, prefix.netmask) if prefix.version == 6: nw_dst_match = {u'ipv6_dst': exp_prefix} table_id = self.IPV6_FIB_TABLE else: nw_dst_match = {u'nw_dst': exp_prefix} table_id = self.IPV4_FIB_TABLE nexthop_action = u'SET_FIELD: {eth_dst:%s}' % nexthop if vlan_vid is not None: nw_dst_match[u'dl_vlan'] = unicode(vlan_vid) if with_group_table: group_id = self.get_group_id_for_matching_flow( nw_dst_match) self.wait_matching_in_group_table( nexthop_action, group_id, timeout) else: if nonzero_packets: self.wait_nonzero_packet_count_flow( nw_dst_match, timeout=timeout, table_id=table_id, actions=[nexthop_action]) else: self.wait_until_matching_flow( nw_dst_match, timeout=timeout, table_id=table_id, actions=[nexthop_action]) def host_ipv4_alias(self, host, alias_ip, intf=None): """Add an IPv4 alias address to a host.""" if intf is None: intf = host.intf() del_cmd = 'ip addr del %s dev %s' % ( alias_ip.with_prefixlen, intf) add_cmd = 'ip addr add %s dev %s label %s:1' % ( alias_ip.with_prefixlen, intf, intf) host.cmd(del_cmd) self.assertEqual('', host.cmd(add_cmd)) def _ip_neigh(self, host, ipa, ip_ver): neighbors = host.cmd('ip -%u neighbor show %s' % (ip_ver, ipa)) neighbors_fields = neighbors.split() if len(neighbors_fields) >= 5: return neighbors.split()[4] return None def _verify_host_learned_mac(self, host, ipa, ip_ver, mac, retries): for _ in range(retries): if self._ip_neigh(host, ipa, ip_ver) == mac: return time.sleep(1) self.fail( 'could not verify %s resolved to %s' % (ipa, mac)) def verify_ipv4_host_learned_mac(self, host, ipa, mac, retries=3): self._verify_host_learned_mac(host, ipa, 4, mac, retries) def verify_ipv4_host_learned_host(self, host, learned_host): learned_ip = ipaddress.ip_interface(unicode(self.host_ipv4(learned_host))) self.verify_ipv4_host_learned_mac(host, learned_ip.ip, learned_host.MAC()) def verify_ipv6_host_learned_mac(self, host, ip6, mac, retries=3): self._verify_host_learned_mac(host, ip6, 6, mac, retries) def verify_ipv6_host_learned_host(self, host, learned_host): learned_ip6 = ipaddress.ip_interface(unicode(self.host_ipv6(learned_host))) self.verify_ipv6_host_learned_mac(host, learned_ip6.ip, learned_host.MAC()) def iperf_client(self, client_host, iperf_client_cmd): for _ in range(3): iperf_results = client_host.cmd(iperf_client_cmd) iperf_csv = iperf_results.strip().split(',') if len(iperf_csv) == 9: return int(iperf_csv[-1]) / self.ONEMBPS time.sleep(1) self.fail('%s: %s' % (iperf_client_cmd, iperf_results)) def iperf(self, client_host, server_host, server_ip, port, seconds): iperf_base_cmd = 'iperf -f M -p %u' % port if server_ip.version == 6: iperf_base_cmd += ' -V' iperf_server_cmd = '%s -s -B %s' % (iperf_base_cmd, server_ip) iperf_server_cmd = faucet_mininet_test_util.timeout_cmd( iperf_server_cmd, (seconds * 3) + 5) iperf_client_cmd = faucet_mininet_test_util.timeout_cmd( '%s -y c -c %s -t %u' % (iperf_base_cmd, server_ip, seconds), seconds + 5) server_start_exp = r'Server listening on TCP port %u' % port for _ in range(3): server_out = server_host.popen( iperf_server_cmd, stdin=faucet_mininet_test_util.DEVNULL, stderr=subprocess.STDOUT, close_fds=True) popens = {server_host: server_out} lines = [] for host, line in pmonitor(popens): if host == server_host: lines.append(line) if re.search(server_start_exp, line): self.wait_for_tcp_listen( server_host, port, ipv=server_ip.version) iperf_mbps = self.iperf_client( client_host, iperf_client_cmd) self._signal_proc_on_port(server_host, port, 9) return iperf_mbps time.sleep(1) self.fail('%s never started (%s, %s)' % ( iperf_server_cmd, server_start_exp, ' '.join(lines))) def verify_ipv4_routing(self, first_host, first_host_routed_ip, second_host, second_host_routed_ip, iperf_port, with_group_table=False): """Verify one host can IPV4 route to another via FAUCET.""" self.host_ipv4_alias(first_host, first_host_routed_ip) self.host_ipv4_alias(second_host, second_host_routed_ip) self.add_host_route( first_host, second_host_routed_ip, self.FAUCET_VIPV4.ip) self.add_host_route( second_host, first_host_routed_ip, self.FAUCET_VIPV4.ip) self.net.ping(hosts=(first_host, second_host)) self.wait_for_route_as_flow( first_host.MAC(), first_host_routed_ip.network, with_group_table=with_group_table) self.wait_for_route_as_flow( second_host.MAC(), second_host_routed_ip.network, with_group_table=with_group_table) self.one_ipv4_ping(first_host, second_host_routed_ip.ip) self.one_ipv4_ping(second_host, first_host_routed_ip.ip) self.verify_ipv4_host_learned_host(first_host, second_host) self.verify_ipv4_host_learned_host(second_host, first_host) # verify at least 1M iperf for client_host, server_host, server_ip in ( (first_host, second_host, second_host_routed_ip.ip), (second_host, first_host, first_host_routed_ip.ip)): iperf_mbps = self.iperf( client_host, server_host, server_ip, iperf_port, 5) error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip)) self.assertGreater(iperf_mbps, 1) # verify packets matched routing flows self.wait_for_route_as_flow( first_host.MAC(), first_host_routed_ip.network, with_group_table=with_group_table, nonzero_packets=True) self.wait_for_route_as_flow( second_host.MAC(), second_host_routed_ip.network, with_group_table=with_group_table, nonzero_packets=True) def verify_ipv4_routing_mesh(self, iperf_port, with_group_table=False): """Verify hosts can route to each other via FAUCET.""" host_pair = self.net.hosts[:2] first_host, second_host = host_pair first_host_routed_ip = ipaddress.ip_interface(u'10.0.1.1/24') second_host_routed_ip = ipaddress.ip_interface(u'10.0.2.1/24') second_host_routed_ip2 = ipaddress.ip_interface(u'10.0.3.1/24') self.verify_ipv4_routing( first_host, first_host_routed_ip, second_host, second_host_routed_ip, iperf_port, with_group_table=with_group_table) self.verify_ipv4_routing( first_host, first_host_routed_ip, second_host, second_host_routed_ip2, iperf_port, with_group_table=with_group_table) self.swap_host_macs(first_host, second_host) self.verify_ipv4_routing( first_host, first_host_routed_ip, second_host, second_host_routed_ip, iperf_port, with_group_table=with_group_table) self.verify_ipv4_routing( first_host, first_host_routed_ip, second_host, second_host_routed_ip2, iperf_port, with_group_table=with_group_table) def host_drop_all_ips(self, host): for ipv in (4, 6): host.cmd('ip -%u addr flush dev %s' % (ipv, host.defaultIntf())) def setup_ipv6_hosts_addresses(self, first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip): """Configure host IPv6 addresses for testing.""" for host in first_host, second_host: host.cmd('ip -6 addr flush dev %s' % host.intf()) self.add_host_ipv6_address(first_host, first_host_ip) self.add_host_ipv6_address(second_host, second_host_ip) self.add_host_ipv6_address(first_host, first_host_routed_ip) self.add_host_ipv6_address(second_host, second_host_routed_ip) for host in first_host, second_host: self.require_host_learned(host) def verify_ipv6_routing(self, first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip, iperf_port, with_group_table=False): """Verify one host can IPV6 route to another via FAUCET.""" self.one_ipv6_ping(first_host, second_host_ip.ip) self.one_ipv6_ping(second_host, first_host_ip.ip) self.add_host_route( first_host, second_host_routed_ip, self.FAUCET_VIPV6.ip) self.add_host_route( second_host, first_host_routed_ip, self.FAUCET_VIPV6.ip) self.wait_for_route_as_flow( first_host.MAC(), first_host_routed_ip.network, with_group_table=with_group_table) self.wait_for_route_as_flow( second_host.MAC(), second_host_routed_ip.network, with_group_table=with_group_table) self.one_ipv6_controller_ping(first_host) self.one_ipv6_controller_ping(second_host) self.one_ipv6_ping(first_host, second_host_routed_ip.ip) # verify at least 1M iperf for client_host, server_host, server_ip in ( (first_host, second_host, second_host_routed_ip.ip), (second_host, first_host, first_host_routed_ip.ip)): iperf_mbps = self.iperf( client_host, server_host, server_ip, iperf_port, 5) error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip)) self.assertGreater(iperf_mbps, 1) self.one_ipv6_ping(first_host, second_host_ip.ip) self.verify_ipv6_host_learned_mac( first_host, second_host_ip.ip, second_host.MAC()) self.one_ipv6_ping(second_host, first_host_ip.ip) self.verify_ipv6_host_learned_mac( second_host, first_host_ip.ip, first_host.MAC()) def verify_ipv6_routing_pair(self, first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip, iperf_port, with_group_table=False): """Verify hosts can route IPv6 to each other via FAUCET.""" self.setup_ipv6_hosts_addresses( first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip) self.verify_ipv6_routing( first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip, iperf_port, with_group_table=with_group_table) def verify_ipv6_routing_mesh(self, iperf_port, with_group_table=False): """Verify IPv6 routing between hosts and multiple subnets.""" host_pair = self.net.hosts[:2] first_host, second_host = host_pair first_host_ip = ipaddress.ip_interface(u'fc00::1:1/112') second_host_ip = ipaddress.ip_interface(u'fc00::1:2/112') first_host_routed_ip = ipaddress.ip_interface(u'fc00::10:1/112') second_host_routed_ip = ipaddress.ip_interface(u'fc00::20:1/112') second_host_routed_ip2 = ipaddress.ip_interface(u'fc00::30:1/112') self.verify_ipv6_routing_pair( first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip, iperf_port, with_group_table=with_group_table) self.verify_ipv6_routing_pair( first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip2, iperf_port, with_group_table=with_group_table) self.swap_host_macs(first_host, second_host) self.verify_ipv6_routing_pair( first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip, iperf_port, with_group_table=with_group_table) self.verify_ipv6_routing_pair( first_host, first_host_ip, first_host_routed_ip, second_host, second_host_ip, second_host_routed_ip2, iperf_port, with_group_table=with_group_table) def verify_invalid_bgp_route(self, pattern): """Check if we see the pattern in Faucet's log""" controller = self._get_controller() count = controller.cmd( 'grep -c "%s" %s' % (pattern, self.env['faucet']['FAUCET_LOG'])) self.assertGreater(count, 0)
Bairdo/faucet
tests/faucet_mininet_test_base.py
Python
apache-2.0
72,009
0.000667
import os from PyQt4.QtCore import pyqtSignal from PyQt4.QtGui import QComboBox, QDoubleValidator from configmanager.editorwidgets.core import ConfigWidget from configmanager.editorwidgets.uifiles.ui_numberwidget_config import Ui_Form class NumberWidgetConfig(Ui_Form, ConfigWidget): description = 'Number entry widget' def __init__(self, parent=None): super(NumberWidgetConfig, self).__init__(parent) self.setupUi(self) self.minEdit.setValidator( QDoubleValidator() ) self.maxEdit.setValidator( QDoubleValidator() ) self.minEdit.textChanged.connect(self.widgetchanged) self.maxEdit.textChanged.connect(self.widgetchanged) self.prefixEdit.textChanged.connect(self.widgetchanged) self.suffixEdit.textChanged.connect(self.widgetchanged) def getconfig(self): config = {} config['max'] = self.maxEdit.text() config['min'] = self.minEdit.text() config['prefix'] = self.prefixEdit.text() config['suffix'] = self.suffixEdit.text() return config def setconfig(self, config): self.blockSignals(True) max = config.get('max', '') min = config.get('min', '') prefix = config.get('prefix', '') suffix = config.get('suffix', '') self.minEdit.setText(min) self.maxEdit.setText(max) self.prefixEdit.setText(prefix) self.suffixEdit.setText(suffix) self.blockSignals(False)
HeatherHillers/RoamMac
src/configmanager/editorwidgets/numberwidget.py
Python
gpl-2.0
1,472
0.004076
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from datetime import datetime, date from optionaldict import optionaldict from wechatpy.client.api.base import BaseWeChatAPI class WeChatWiFi(BaseWeChatAPI): API_BASE_URL = 'https://api.weixin.qq.com/bizwifi/' def list_shops(self, page_index=1, page_size=20): """ 获取门店列表 详情请参考 http://mp.weixin.qq.com/wiki/15/bcfb5d4578ea818b89913472cf2bbf8f.html :param page_index: 可选,分页下标,默认从1开始 :param page_size: 可选,每页的个数,默认20个,最大20个 :return: 返回的 JSON 数据包 """ res = self._post( 'shop/list', data={ 'pageindex': page_index, 'pagesize': page_size, } ) return res['data'] def add_device(self, shop_id, ssid, password, bssid): """ 添加设备 详情请参考 http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html :param shop_id: 门店 ID :param ssid: 无线网络设备的ssid。非认证公众号添加的ssid必需是“WX”开头(“WX”为大写字母), 认证公众号和第三方平台无此限制;所有ssid均不能包含中文字符 :param password: 无线网络设备的密码,大于8个字符,不能包含中文字符 :param bssid: 无线网络设备无线mac地址,格式冒号分隔,字符长度17个,并且字母小写 :return: 返回的 JSON 数据包 """ return self._post( 'device/add', data={ 'shop_id': shop_id, 'ssid': ssid, 'password': password, 'bssid': bssid, } ) def list_devices(self, shop_id=None, page_index=1, page_size=20): """ 查询设备 详情请参考 http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html :param shop_id: 可选,门店 ID :param page_index: 可选,分页下标,默认从1开始 :param page_size: 可选,每页的个数,默认20个,最大20个 :return: 返回的 JSON 数据包 """ data = optionaldict( shop_id=shop_id, pageindex=page_index, pagesize=page_size ) res = self._post('device/list', data=data) return res['data'] def delete_device(self, bssid): """ 删除设备 详情请参考 http://mp.weixin.qq.com/wiki/10/6232005bdc497f7cf8e19d4e843c70d2.html :param bssid: 无线网络设备无线mac地址,格式冒号分隔,字符长度17个,并且字母小写 :return: 返回的 JSON 数据包 """ return self._post('device/delete', data={'bssid': bssid}) def get_qrcode_url(self, shop_id, img_id): """ 获取物料二维码图片网址 详情请参考 http://mp.weixin.qq.com/wiki/7/fcd0378ef00617fc276be2b3baa80973.html :param shop_id: 门店 ID :param img_id: 物料样式编号:0-二维码,可用于自由设计宣传材料; 1-桌贴(二维码),100mm×100mm(宽×高),可直接张贴 :return: 二维码图片网址 """ res = self._post( 'qrcode/get', data={ 'shop_id': shop_id, 'img_id': img_id, } ) return res['data']['qrcode_url'] def set_homepage(self, shop_id, template_id, url=None): """ 设置商家主页 详情请参考 http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html :param shop_id: 门店 ID :param template_id: 模板ID,0-默认模板,1-自定义url :param url: 自定义链接,当template_id为1时必填 :return: 返回的 JSON 数据包 """ data = { 'shop_id': shop_id, 'template_id': template_id, } if url: data['struct'] = {'url': url} return self._post('homepage/set', data=data) def get_homepage(self, shop_id): """ 查询商家主页 详情请参考 http://mp.weixin.qq.com/wiki/6/2732f3cf83947e0e4971aa8797ee9d6a.html :param shop_id: 门店 ID :return: 返回的 JSON 数据包 """ res = self._post('homepage/get', data={'shop_id': shop_id}) return res['data'] def list_statistics(self, begin_date, end_date, shop_id=-1): """ Wi-Fi数据统计 详情请参考 http://mp.weixin.qq.com/wiki/8/dfa2b756b66fca5d9b1211bc18812698.html :param begin_date: 起始日期时间,最长时间跨度为30天 :param end_date: 结束日期时间戳,最长时间跨度为30天 :param shop_id: 可选,门店 ID,按门店ID搜索,-1为总统计 :return: 返回的 JSON 数据包 """ if isinstance(begin_date, (datetime, date)): begin_date = begin_date.strftime('%Y-%m-%d') if isinstance(end_date, (datetime, date)): end_date = end_date.strftime('%Y-%m-%d') res = self._post( 'statistics/list', data={ 'begin_date': begin_date, 'end_date': end_date, 'shop_id': shop_id } ) return res['data']
chenjiancan/wechatpy
wechatpy/client/api/wifi.py
Python
mit
5,576
0
#!/usr/bin/env python """ conference.py -- Udacity conference server-side Python App Engine API; uses Google Cloud Endpoints $Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $ created by wesc on 2014 apr 21 """ __author__ = 'wesc+api@google.com (Wesley Chun)' from datetime import datetime import json import os import time from utils import getUserId from utils import validate_websafe_key from utils import ndb_to_message from utils import message_to_ndb import endpoints from protorpc import messages from protorpc import message_types from protorpc import remote from google.appengine.api import urlfetch from google.appengine.ext import ndb from google.appengine.api import memcache from google.appengine.api import taskqueue from models import Profile from models import ProfileMiniForm from models import ProfileForm from models import TeeShirtSize from models import Conference from models import ConferenceForm from models import ConferenceForms from models import ConferenceQueryForm from models import ConferenceQueryForms from models import BooleanMessage from models import ConflictException from models import StringMessage from models import Session from models import SessionForm from models import SessionCreateForm from models import SessionForms from models import SessionType from models import Speaker from models import SpeakerForm from models import SpeakerCreateForm from models import SpeakerForms from models import WishList from models import WishListForm from settings import WEB_CLIENT_ID EMAIL_SCOPE = endpoints.EMAIL_SCOPE API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID # - - - Conference Defaults - - - - - - - - - - - - - - - - - DEFAULTS = { "city": "Default City", "maxAttendees": 0, "seatsAvailable": 0, "topics": [ "Default", "Topic" ], } OPERATORS = { 'EQ': '=', 'GT': '>', 'GTEQ': '>=', 'LT': '<', 'LTEQ': '<=', 'NE': '!=' } FIELDS = { 'CITY': 'city', 'TOPIC': 'topics', 'MONTH': 'month', 'MAX_ATTENDEES': 'maxAttendees', } MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT ANNOUNCEMENTS" CONF_POST_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage, websafeConferenceKey=messages.StringField(1), register=messages.BooleanField(2)) CONF_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage, websafeConferenceKey=messages.StringField(1)) SESSION_POST_REQUEST = endpoints.ResourceContainer(SessionCreateForm, websafeConferenceKey=messages.StringField(1)) SESSIONS_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage, websafeConferenceKey=messages.StringField(1), sessionType=messages.StringField(2)) SPEAKER_GET_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage, websafeSpeakerKey=messages.StringField(1)) WISHLIST_PUT_REQUEST = endpoints.ResourceContainer(message_types.VoidMessage, add=messages.StringField(1), remove=messages.StringField(2)) SPEAKER_QUERY_BY_NAME = endpoints.ResourceContainer(message_types.VoidMessage, firstName=messages.StringField(1), lastName=messages.StringField(2)) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @endpoints.api(name='conference', version='v1', allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID], scopes=[EMAIL_SCOPE]) class ConferenceApi(remote.Service): """Conference API v0.1""" # - - - Profile objects - - - - - - - - - - - - - - - - - - - def _copyProfileToForm(self, prof): """Copy relevant fields from Profile to ProfileForm.""" # copy relevant fields from Profile to ProfileForm pf = ProfileForm() for field in pf.all_fields(): if hasattr(prof, field.name): # convert t-shirt string to Enum; just copy others if field.name == 'teeShirtSize': setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name))) else: setattr(pf, field.name, getattr(prof, field.name)) pf.check_initialized() return pf def _getProfileFromUser(self): """Return user Profile from datastore, creating new one if non-existent.""" ## TODO 2 ## step 1: make sure user is authed ## uncomment the following lines: user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') user_id = getUserId(user) p_key = ndb.Key(Profile, user_id) profile = p_key.get() ## step 2: create a new Profile from logged in user data ## you can use user.nickname() to get displayName ## and user.email() to get mainEmail if not profile: profile = Profile(userId=None, key=p_key, displayName=user.nickname(), mainEmail=user.email(), teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED), ) profile.put() return profile # return Profile def _doProfile(self, save_request=None): """Get user Profile and return to user, possibly updating it first.""" # get user Profile prof = self._getProfileFromUser() # if saveProfile(), process user-modifyable fields if save_request: for field in ('displayName', 'teeShirtSize'): if hasattr(save_request, field): val = getattr(save_request, field) if val: setattr(prof, field, str(val)) prof.put() # return ProfileForm return self._copyProfileToForm(prof) @endpoints.method(message_types.VoidMessage, ProfileForm, path='profile', http_method='GET', name='getProfile') def getProfile(self, request): """Return user profile.""" return self._doProfile() # TODO 1 # 1. change request class # 2. pass request to _doProfile function @endpoints.method(ProfileMiniForm, ProfileForm, path='profile', http_method='POST', name='saveProfile') def saveProfile(self, request): """Update & return user profile.""" return self._doProfile(request) # - - - Conference objects - - - - - - - - - - - - - - - - - def _copyConferenceToForm(self, conf, displayName): """Copy relevant fields from Conference to ConferenceForm.""" cf = ConferenceForm() for field in cf.all_fields(): if hasattr(conf, field.name): # convert Date to date string; just copy others if field.name.endswith('Date'): setattr(cf, field.name, str(getattr(conf, field.name))) else: setattr(cf, field.name, getattr(conf, field.name)) elif field.name == "websafeKey": setattr(cf, field.name, conf.key.urlsafe()) if displayName: setattr(cf, 'organizerDisplayName', displayName) cf.check_initialized() return cf def _createConferenceObject(self, request): """Create or update Conference object, returning ConferenceForm/request.""" # preload necessary data items user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') user_id = getUserId(user) if not request.name: raise endpoints.BadRequestException( "Conference 'name' field required") # copy ConferenceForm/ProtoRPC Message into dict data = {field.name: getattr(request, field.name) for field in request.all_fields()} del data['websafeKey'] del data['organizerDisplayName'] # add default values for those missing (both data model & outbound # Message) for df in DEFAULTS: if data[df] in (None, []): data[df] = DEFAULTS[df] setattr(request, df, DEFAULTS[df]) # convert dates from strings to Date objects; set month based on # start_date if data['startDate']: data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date() data['month'] = data['startDate'].month else: data['month'] = 0 if data['endDate']: data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date() # set seatsAvailable to be same as maxAttendees on creation # both for data model & outbound Message if data["maxAttendees"] > 0: data["seatsAvailable"] = data["maxAttendees"] setattr(request, "seatsAvailable", data["maxAttendees"]) # make Profile Key from user ID p_key = ndb.Key(Profile, user_id) # allocate new Conference ID with Profile key as parent c_id = Conference.allocate_ids(size=1, parent=p_key)[0] # make Conference key from ID c_key = ndb.Key(Conference, c_id, parent=p_key) data['key'] = c_key data['organizerUserId'] = request.organizerUserId = user_id # Look for TODO 2 # create Conference, send email to organizer confirming # creation of Conference & return (modified) ConferenceForm Conference(**data).put() taskqueue.add( params={'email': user.email(), 'conferenceInfo': repr(request)}, url='/tasks/send_confirmation_email') return request @endpoints.method(ConferenceForm, ConferenceForm, path='conference', http_method='POST', name='createConference') def createConference(self, request): """Create new conference.""" return self._createConferenceObject(request) @endpoints.method(CONF_GET_REQUEST, ConferenceForm, path='conference/{websafeConferenceKey}', http_method='GET', name='getConference') def getConference(self, request): """ Gets details about the specified conference """ conf_key = ndb.Key(urlsafe=request.websafeConferenceKey) conference = conf_key.get() return self._copyConferenceToForm(conference, "") def _getQuery(self, request): """Return formatted query from the submitted filters.""" q = Conference.query() inequality_filter, filters = self._formatFilters(request.filters) # If exists, sort on inequality filter first if not inequality_filter: q = q.order(Conference.name) else: q = q.order(ndb.GenericProperty(inequality_filter)) q = q.order(Conference.name) for filtr in filters: if filtr["field"] in ["month", "maxAttendees"]: filtr["value"] = int(filtr["value"]) formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"]) q = q.filter(formatted_query) return q def _formatFilters(self, filters): """Parse, check validity and format user supplied filters.""" formatted_filters = [] inequality_field = None for f in filters: filtr = {field.name: getattr(f, field.name) for field in f.all_fields()} try: filtr["field"] = FIELDS[filtr["field"]] filtr["operator"] = OPERATORS[filtr["operator"]] except KeyError: raise endpoints.BadRequestException( "Filter contains invalid field or operator.") # Every operation except "=" is an inequality if filtr["operator"] != "=": # check if inequality operation has been used in previous # filters # disallow the filter if inequality was performed on a # different field before # track the field on which the inequality operation is # performed if inequality_field and inequality_field != filtr["field"]: raise endpoints.BadRequestException( "Inequality filter is allowed on only one field.") else: inequality_field = filtr["field"] formatted_filters.append(filtr) return (inequality_field, formatted_filters) @endpoints.method(ConferenceQueryForms, ConferenceForms, path='queryConferences', http_method='POST', name='queryConferences') def queryConferences(self, request): """Query for conferences.""" conferences = self._getQuery(request) # return individual ConferenceForm object per Conference return ConferenceForms( items=[self._copyConferenceToForm(conf, "") for conf in conferences]) @endpoints.method(message_types.VoidMessage, ConferenceForms, path='getConferencesCreated', http_method='GET', name='getConferencesCreated') def getConferencesCreated(self, request): """ Get a list of all the conferences created by the logged in user """ user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required') user_id = getUserId(user) profile_key = ndb.Key(Profile, user_id) conferences = Conference.query(ancestor=profile_key) profile = profile_key.get() display_name = getattr(profile, 'displayName') return ConferenceForms( items=[self._copyConferenceToForm(conf, display_name) for conf in conferences]) # - - - Registration - - - - - - - - - - - - - - - - - - - - @ndb.transactional(xg=True) def _conferenceRegistration(self, request, reg=True): """Register or unregister user for selected conference.""" retval = None prof = self._getProfileFromUser() # get user Profile # check if conf exists given websafeConfKey # get conference; check that it exists wsck = request.websafeConferenceKey conf = ndb.Key(urlsafe=wsck).get() if not conf: raise endpoints.NotFoundException( 'No conference found with key: %s' % wsck) # register if reg: # check if user already registered otherwise add if wsck in prof.conferenceKeysToAttend: raise ConflictException( "You have already registered for this conference") # check if seats avail if conf.seatsAvailable <= 0: raise ConflictException("There are no seats available.") # register user, take away one seat prof.conferenceKeysToAttend.append(wsck) conf.seatsAvailable -= 1 retval = True # unregister else: # check if user already registered if wsck in prof.conferenceKeysToAttend: # unregister user, add back one seat prof.conferenceKeysToAttend.remove(wsck) conf.seatsAvailable += 1 retval = True else: retval = False # write things back to the datastore & return prof.put() conf.put() return BooleanMessage(data=retval) @endpoints.method(CONF_POST_REQUEST, BooleanMessage, path='conference/{websafeConferenceKey}', http_method='POST', name='registerForConference') def registerForConference(self, request): """Register user for selected conference.""" if request.register == False: return self._conferenceRegistration(request, False) else: return self._conferenceRegistration(request) @endpoints.method(message_types.VoidMessage, ConferenceForms, path='conferences/attending', http_method='GET', name='getConferencesToAttend') def getConferencesToAttend(self, request): """Get list of conferences that user has registered for.""" # TODO: # step 1: get user profile user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorizaton required.') profile_key = ndb.Key(Profile, getUserId(user)) profile = profile_key.get() # step 2: get conferenceKeysToAttend from profile. # to make a ndb key from websafe key you can use: # ndb.Key(urlsafe=my_websafe_key_string) conferenceWsKeysToAttend = profile.conferenceKeysToAttend conferenceKeysToAttend = [] for wsKey in conferenceWsKeysToAttend: key = ndb.Key(urlsafe=wsKey) conferenceKeysToAttend.append(key) # step 3: fetch conferences from datastore. # Use get_multi(array_of_keys) to fetch all keys at once. # Do not fetch them one by one! conferences = ndb.get_multi(conferenceKeysToAttend) # return set of ConferenceForm objects per Conference return ConferenceForms( items=[self._copyConferenceToForm(conf, "") for conf in conferences]) # - - - Announcements - - - - - - - - - - - - - - - - - - - - @staticmethod def _cacheAnnouncement(): """Create Announcement & assign to memcache; used by memcache cron job & putAnnouncement(). """ confs = Conference.query(ndb.AND(Conference.seatsAvailable <= 5, Conference.seatsAvailable > 0)).fetch( projection=[Conference.name]) if confs: # If there are almost sold out conferences, # format announcement and set it in memcache announcement = '%s %s' % ( 'Last chance to attend! The following conferences ' 'are nearly sold out:', ', '.join(conf.name for conf in confs)) memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement) else: # If there are no sold out conferences, # delete the memcache announcements entry announcement = "" memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY) return announcement @endpoints.method(message_types.VoidMessage, StringMessage, path='conference/announcement/get', http_method='GET', name='getAnnouncement') def getAnnouncement(self, request): """Return Announcement from memcache.""" # TODO 1 # return an existing announcement from Memcache or an empty string. announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) if announcement is None: announcement = "" return StringMessage(data=announcement) # - - - Sessions - - - - - - - - - - - - - - - - - - - - - - - - - def _getSessionFormsFromWsKeys(self, ws_session_keys): """ Returns a list of sessions as a session_forms message given an array of websafe session keys :param ws_session_keys: String, websafe session keys :return: session_forms """ session_keys = [] for ws_session_key in ws_session_keys: session_key = ndb.Key(urlsafe=ws_session_key) session_keys.append(session_key) sessions = ndb.get_multi(session_keys) session_forms = SessionForms( items=[self._copySessionToForm(session) for session in sessions]) return session_forms def _copySessionToForm(self, session): """ Converts a session object into a session_form message :param session: A session object :return: session_form """ session_form = SessionForm() exclusions = ['typeOfSession', 'speaker'] session_form = ndb_to_message(session, session_form, exclusions) if session.typeOfSession: session_form.typeOfSession = SessionType(session.typeOfSession) if session.speaker: session_form.speaker = self._getSpeakerFormFromWsKey( session.speaker) session_form.check_initialized() return session_form def _createSessionObject(self, request): """ :param request: the endpoint request :return: session_form, message of the newly created session """ user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required.') user_id = getUserId(user) # make sure we're given a websafe conference key conference_key = validate_websafe_key(request.websafeConferenceKey, 'Conference') # if we're given a websafe speaker key, make sure it's valid if request.speaker: validate_websafe_key(request.speaker, 'Speaker') # get the conference conference = conference_key.get() # make sure the user can edit this conference if conference.organizerUserId != user_id: raise endpoints.BadRequestException( 'You cannot edit this conference.') # create a session object session = Session() # list the fields we want to exclude exclusions = ['websafeConferenceKey', 'typeOfSession'] # use our handy copy function to copy the other fields session = message_to_ndb(request, session, exclusions) # deal with typeOfSession and get the enum value if request.typeOfSession: session.typeOfSession = str(SessionType(request.typeOfSession)) else: session.typeOfSession = str(SessionType.NOT_SPECIFIED) # allocate an id and create the key session_id = Session.allocate_ids(size=1, parent=conference_key)[0] session.key = ndb.Key(Session, session_id, parent=conference_key) # save the session to ndb session.put() # kick off the featured speaker task taskqueue.add(url='/tasks/set_featured_speaker', params={'conference_key': conference_key.urlsafe()}) # return the newly created session return self._copySessionToForm(session) @endpoints.method(SESSION_POST_REQUEST, SessionForm, path='conference/{websafeConferenceKey}/session', http_method='POST', name='createConferenceSession') def createConferenceSession(self, request): """ Create a session for a conference. """ return self._createSessionObject(request) @endpoints.method(SESSIONS_GET_REQUEST, SessionForms, path='conference/{websafeConferenceKey}/session', http_method='GET', name='getConferenceSessions') def getConferenceSessions(self, request): """ Get the list of sessions for a conference. """ conference_key = validate_websafe_key(request.websafeConferenceKey, 'Conference') # Get all the sessions where the provided Conference is the ancestor q = Session.query(ancestor=conference_key) # If sessionType is provided as a query string, apply as a filter if request.sessionType: session_type = request.sessionType.upper() if hasattr(SessionType, session_type): q = q.filter(Session.typeOfSession == session_type) # return the list of sessions q = q.order(Session.startTime) return SessionForms( items=[self._copySessionToForm(session) for session in q]) # - - - - SPEAKERS - - - - - - - - - - - - - - - - - - - - - - - - - - - - def _copySpeakerToForm(self, speaker): """ Copies the fields from a Speaker object to a Speaker message """ speaker_form = SpeakerForm() speaker_form = ndb_to_message(speaker, speaker_form) speaker_form.check_initialized() return speaker_form def _getSpeakerFormFromWsKey(self, ws_speaker_key): """ Returns a Speaker message given a websafe Speaker key. :param ws_speaker_key: String, websafe Speaker key :return: speaker_form """ # if ndb.Key(urlsafe=ws_speaker_key).kind() != 'Speaker': # raise endpoints.BadRequestException('Invalid speaker provided.') speaker_key = ndb.Key(urlsafe=ws_speaker_key) speaker = speaker_key.get() speaker_form = self._copySpeakerToForm(speaker) return speaker_form @endpoints.method(SpeakerCreateForm, SpeakerForm, path='speaker', http_method='POST', name='createSpeaker') def createSpeaker(self, request): """ Create a speaker """ # Make sure the user is logged in. user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required.') user_id = getUserId(user) user_key = ndb.Key(Profile, user_id) # Create the Speaker object speaker = Speaker() # Copy the fields from the request to the Speaker speaker = message_to_ndb(request, speaker) # Allocate the Speaker id and set the key with the User as parent speaker_id = speaker.allocate_ids(size=1, parent=user_key)[0] speaker.key = ndb.Key(Speaker, speaker_id, parent=user_key) # Write the speaker to the db speaker.put() # Create a SpeakerForm and copy the fields from the request speaker_form = SpeakerForm() speaker_form = ndb_to_message(speaker, speaker_form) # Send back the SpeakerForm including the websafe key return speaker_form @endpoints.method(SPEAKER_GET_REQUEST, SpeakerForm, path='speaker/{websafeSpeakerKey}', http_method='GET', name='getSpeaker') def getSpeaker(self, request): """ Get all the information about a speaker. """ # validate the websafe speaker key and retrieve the entity key speaker_key = validate_websafe_key(request.websafeSpeakerKey, 'Speaker') # get the speaker from the db speaker = speaker_key.get() # return a message object with the speaker info return self._copySpeakerToForm(speaker) @endpoints.method(message_types.VoidMessage, SpeakerForms, path='speaker', http_method='GET', name='getSpeakerList') def getSpeakerList(self, request): """ List all of the speakers. """ q = Speaker.query() # Order the speakers by last name then first name q = q.order(Speaker.lastName) q = q.order(Speaker.firstName) speakers = q.fetch() # return the list of speakers return SpeakerForms( items=[self._copySpeakerToForm(speaker) for speaker in speakers]) @endpoints.method(SPEAKER_GET_REQUEST, SessionForms, path='speakers/{websafeSpeakerKey}/sessions', http_method='GET', name='getSessionsBySpeaker') def getSessionsBySpeaker(self, request): """ Get the sessions at which a speaker is speaker across all Conferences. """ # Validate the websafe speaker key and retrieve the entity key speaker_key = validate_websafe_key(request.websafeSpeakerKey, 'Speaker') # query sessions where the speaker is the requested speaker q = Session.query() q = q.filter(Session.speaker == speaker_key.urlsafe()) sessions = q.fetch() # return the list of sessions return SessionForms( items=[self._copySessionToForm(session) for session in sessions]) # - - - - Wish List - - - - - - - - - - - - - - - - - - - - - - - - - - - - def _createWishlist(self, user_key): ''' Creates a wishlist for a user ''' wishlist_id = WishList.allocate_ids(size=1, parent=user_key)[0] wishlist_key = ndb.Key(WishList, wishlist_id, parent=user_key) wishlist = WishList() wishlist.key = wishlist_key return wishlist def _copyWishListToForm(self, wishlist): ''' Creates a message from a wishlist ''' wishlist_form = WishListForm() wishlist_form.sessions = self._getSessionFormsFromWsKeys( wishlist.sessions) return wishlist_form @endpoints.method(CONF_GET_REQUEST, WishListForm, path='user/wishlist/{websafeConferenceKey}', http_method='GET', name='getSessionsInWishlistByConference') def getSessionsInWishlistByConference(self, request): """ List the wishlist items for the specified conference. """ # validate the websafe conference key conference_key = validate_websafe_key(request.websafeConferenceKey, 'Conference') # confirm the user user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required.') user_id = getUserId(user) user_key = ndb.Key(Profile, user_id) # get the user's wishlist sessions as a projection q_wishlist = WishList.query(ancestor=user_key) # wl_sessions = q_wishlist.fetch(1, projection=[WishList.sessions]) wishlist = q_wishlist.fetch(1)[0] wishlist_session_keys = [] # for session in wl_sessions: for session in wishlist.sessions: wishlist_session_keys.append(ndb.Key(urlsafe=session)) # query Sessions where the specified Conference is the ancestor session_q = Session.query(ancestor=conference_key) # filter the Sessions to include only the sessions in the wishlist session_q = session_q.filter(Session.key.IN(wishlist_session_keys)) # get the keys of those sessions, which are the ones we're looking for conf_session_keys = session_q.fetch(keys_only=True) # create a wishlist short_wishlist = WishList() # copy the found Session keys into the wishlist as websafe keys for key in conf_session_keys: short_wishlist.sessions.append(key.urlsafe()) # return the reduced wishlist as a message return self._copyWishListToForm(short_wishlist) @endpoints.method(WISHLIST_PUT_REQUEST, WishListForm, path='user/wishlist', http_method='PUT', name='updateWishlist') def updateWishlist(self, request): """ Add or remove sessions to the logged in user's wishlist """ user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required.') # Validate the websafe session key to add ws_add_key = None ws_remove_key = None if request.add: ws_add_key = validate_websafe_key(request.add, 'Session', False) # Validate the websafe session key to remove if request.remove: ws_remove_key = validate_websafe_key(request.remove, 'Session', False) # Get the user wishlist user_id = getUserId(user) user_key = ndb.Key(Profile, user_id) wishlist = WishList.query(ancestor=user_key).fetch(1) # If there wasn't previously a wishlist, create it if not wishlist: wishlist = self._createWishlist(user_key) else: wishlist = wishlist[0] # If there is a session to add, add it if ws_add_key: if ws_add_key not in wishlist.sessions: wishlist.sessions.append(ws_add_key) # If there is a session to remove, remove it if ws_remove_key: if ws_remove_key in wishlist.sessions: wishlist.sessions.remove(ws_remove_key) # Save the wishlist to db wishlist.put() # Create a message of the newly created wishlist wishlist_form = self._copyWishListToForm(wishlist) return wishlist_form @endpoints.method(message_types.VoidMessage, WishListForm, path='user/wishlist', http_method='GET', name='getSessionsInWishlist') def getSessionsInWishlist(self, request): """ List all of the sessions in the logged in user's wishlist """ user = endpoints.get_current_user() if not user: raise endpoints.UnauthorizedException('Authorization required.') user_id = getUserId(user) user_key = ndb.Key(Profile, user_id) # Get the user's wishlist wishlist = WishList.query(ancestor=user_key).fetch(1) if wishlist: wishlist = wishlist[0] # Return the wishlist return self._copyWishListToForm(wishlist) # - - - - FEATURED SPEAKER - - - - - - - - - - - - - - - - - - - - - @staticmethod def _get_conf_featured_speaker(conference_key): # Get all the sessions for a Conference ordered by created datetime q = Session.query(ancestor=conference_key) q = q.order(-Session.created_at) # Just need the speaker websafe key sessions = q.fetch(projection=Session.speaker) # Count the sessions for each speaker speaker_counter = {} for session in sessions: if session.speaker: if session.speaker not in speaker_counter: speaker_counter[session.speaker] = 1 else: speaker_counter[session.speaker] += 1 # Find the maximum number of times a speaker is speaking if not speaker_counter: return None max_speaker_count = max(speaker_counter.values()) # Get all the speakers who are speaking the max number of times max_speakers = [key for key in speaker_counter.keys() if speaker_counter[key] == max_speaker_count] # featured_speaker_ws_key = '' featured_speaker_ws_key = None # If there is only one speaker, that's our featured speaker if len(max_speakers) == 1: featured_speaker_ws_key = max_speakers[0] # Else, cycle through the sessions and get the first speaker who # is in the set of speakers speaking the max number of times else: for session in sessions: if session.speaker in max_speakers: featured_speaker_ws_key = session.speaker break if featured_speaker_ws_key: return featured_speaker_ws_key else: return None @staticmethod def _featured_speaker_memcache_key(conference_key): # Create and return a memcache key for the featured speaker memcache_key = '-'.join(("feature-speaker", str(conference_key))) return memcache_key @classmethod def _cache_featured_speaker(cls, ws_conference_key): # Get the conference key from the websafe key conference_key = ndb.Key(urlsafe=ws_conference_key) # Get the featured speaker featured_speaker = cls._get_conf_featured_speaker(conference_key) # Get the memcache key memcache_key = cls._featured_speaker_memcache_key(ws_conference_key) # If there is a featured speaker, set the message and save it to cache if featured_speaker: speaker_key = ndb.Key(urlsafe=featured_speaker) speaker = speaker_key.get() speaker_name = ' '.join((speaker.firstName, speaker.lastName)) message = '%s %s' % (speaker_name, 'is the featured speaker.') memcache.set(memcache_key, message) else: message = "" memcache.delete(memcache_key) return message @classmethod def _cache_featured_speakers(cls): # Get the keys for all conferences conferences = Conference.query().fetch(keys_only=True) # For each Conference websafe key, cache the featured speaker for conference in conferences: if conference: cls._cache_featured_speaker(conference.urlsafe()) return @endpoints.method(CONF_GET_REQUEST, StringMessage, path='conference/{websafeConferenceKey}/featuredSpeaker', http_method='GET', name='getConferenceFeaturedSpeaker') def getConferenceFeaturedSpeaker(self, request): """ Gets the featured speaker for a conference """ ws_conference_key = validate_websafe_key(request.websafeConferenceKey, 'Conference', False) # Get the memcache key we're looking for memcache_key = self._featured_speaker_memcache_key(ws_conference_key) # retrieve the message from memcache message = memcache.get(memcache_key) # If there is a message, return it if message is None: message = "" return StringMessage(data=message) # - - - - Query Problem - - - - - - - - - - - - - - - - - - - - - - - - - @endpoints.method(CONF_GET_REQUEST, SessionForms, path='conference/{websafeConferenceKey}/queryProblem', http_method='GET', name='conferenceQueryProblem') def conferenceQueryProblem(self, request): """ Returns sessions before 7pm that are not workshops """ # Validate the websafe conference key and retrieve the entity key conference_key = validate_websafe_key(request.websafeConferenceKey, 'Conference') # Query for all sessions which are children of the conference q = Session.query(ancestor=conference_key) # Filter for startTime less than 7pm (19:00) startTimeFilter = datetime.strptime('19:00:00', '%H:%M:%S').time() q = q.filter(Session.startTime < startTimeFilter) q = q.filter(Session.startTime != None) q = q.order(Session.startTime) # Get the result with a projection of typeOfSession earlySessions = q.fetch(projection=[Session.typeOfSession]) # Iterate through the results and keep only non-workshop results keys = [s.key for s in earlySessions if s.typeOfSession != 'WORKSHOP'] # Get the db results for the reduced set of keys sessions = ndb.get_multi(keys) # Return the result as a list of sessions return SessionForms( items=[self._copySessionToForm(session) for session in sessions]) # - - - - Additional Queries - - - - - - - - - - - - - - - - - - - - - - @endpoints.method(message_types.VoidMessage, ConferenceForms, path='conference/query/seatsAvailable', http_method='GET', name='getConferencesWithSpace') def getConferencesWithSpace(self, request): ''' Get a list of conferences with seats available. ''' q = Conference.query() q = q.filter(Conference.seatsAvailable > 0) q = q.order(Conference.seatsAvailable) return ConferenceForms( items=[self._copyConferenceToForm(conf, "") for conf in q]) @endpoints.method(SPEAKER_QUERY_BY_NAME, SessionForms, path='session/query/byName', http_method='POST', name='getSessionsBySpeakerName') def getSessionsBySpeakerName(self, request): """ Get a list of sessions by speaker first and/or last name. """ first_name = request.firstName last_name = request.lastName if not first_name and not last_name: return SessionForms(items=[]) # query speakers for first name and last name, if provided speaker_q = Speaker.query() if first_name: speaker_q = speaker_q.filter(Speaker.firstName == first_name) if last_name: speaker_q = speaker_q.filter(Speaker.lastName == last_name) speaker_keys = speaker_q.fetch(keys_only=True) # convert the speaker keys to websafe speaker keys ws_speaker_keys = [] for key in speaker_keys: ws_speaker_keys.append(key.urlsafe()) # query the sessions that have one of these websafe speaker keys session_q = Session.query() session_q = session_q.filter(Session.speaker.IN(ws_speaker_keys)) # return the sessions return SessionForms(items=[self._copySessionToForm(session) for session in session_q]) # registers API api = endpoints.api_server([ConferenceApi])
kirklink/udacity-fullstack-p4
conference.py
Python
apache-2.0
40,994
0.000488
# -*- coding: utf-8 -*- """ *************************************************************************** ExportGeometryInfo.py --------------------- Date : August 2012 Copyright : (C) 2012 by Victor Olaya Email : volayaf at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Victor Olaya' __date__ = 'August 2012' __copyright__ = '(C) 2012, Victor Olaya' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import os from qgis.PyQt.QtGui import QIcon from qgis.PyQt.QtCore import QVariant from qgis.core import (QgsCoordinateTransform, QgsField, QgsFields, QgsWkbTypes, QgsFeatureSink, QgsDistanceArea, QgsProcessingUtils, QgsProcessingParameterFeatureSource, QgsProcessingParameterEnum, QgsProcessingParameterFeatureSink) from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm from processing.tools import vector pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class ExportGeometryInfo(QgisAlgorithm): INPUT = 'INPUT' METHOD = 'CALC_METHOD' OUTPUT = 'OUTPUT' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'export_geometry.png')) def tags(self): return self.tr('export,add,information,measurements,areas,lengths,perimeters,latitudes,longitudes,x,y,z,extract,points,lines,polygons').split(',') def group(self): return self.tr('Vector geometry') def __init__(self): super().__init__() self.export_z = False self.export_m = False self.distance_area = None self.calc_methods = [self.tr('Layer CRS'), self.tr('Project CRS'), self.tr('Ellipsoidal')] def initAlgorithm(self, config=None): self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT, self.tr('Input layer'))) self.addParameter(QgsProcessingParameterEnum(self.METHOD, self.tr('Calculate using'), options=self.calc_methods, defaultValue=0)) self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Added geom info'))) def name(self): return 'exportaddgeometrycolumns' def displayName(self): return self.tr('Export geometry columns') def processAlgorithm(self, parameters, context, feedback): source = self.parameterAsSource(parameters, self.INPUT, context) method = self.parameterAsEnum(parameters, self.METHOD, context) wkb_type = source.wkbType() fields = source.fields() new_fields = QgsFields() if QgsWkbTypes.geometryType(wkb_type) == QgsWkbTypes.PolygonGeometry: new_fields.append(QgsField('area', QVariant.Double)) new_fields.append(QgsField('perimeter', QVariant.Double)) elif QgsWkbTypes.geometryType(wkb_type) == QgsWkbTypes.LineGeometry: new_fields.append(QgsField('length', QVariant.Double)) else: new_fields.append(QgsField('xcoord', QVariant.Double)) new_fields.append(QgsField('ycoord', QVariant.Double)) if QgsWkbTypes.hasZ(source.wkbType()): self.export_z = True new_fields.append(QgsField('zcoord', QVariant.Double)) if QgsWkbTypes.hasM(source.wkbType()): self.export_m = True new_fields.append(QgsField('mvalue', QVariant.Double)) fields = QgsProcessingUtils.combineFields(fields, new_fields) (sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context, fields, wkb_type, source.sourceCrs()) coordTransform = None # Calculate with: # 0 - layer CRS # 1 - project CRS # 2 - ellipsoidal self.distance_area = QgsDistanceArea() if method == 2: self.distance_area.setSourceCrs(source.sourceCrs()) self.distance_area.setEllipsoid(context.project().ellipsoid()) elif method == 1: coordTransform = QgsCoordinateTransform(source.sourceCrs(), context.project().crs()) features = source.getFeatures() total = 100.0 / source.featureCount() if source.featureCount() else 0 for current, f in enumerate(features): if feedback.isCanceled(): break outFeat = f attrs = f.attributes() inGeom = f.geometry() if inGeom: if coordTransform is not None: inGeom.transform(coordTransform) if inGeom.type() == QgsWkbTypes.PointGeometry: attrs.extend(self.point_attributes(inGeom)) elif inGeom.type() == QgsWkbTypes.PolygonGeometry: attrs.extend(self.polygon_attributes(inGeom)) else: attrs.extend(self.line_attributes(inGeom)) outFeat.setAttributes(attrs) sink.addFeature(outFeat, QgsFeatureSink.FastInsert) feedback.setProgress(int(current * total)) return {self.OUTPUT: dest_id} def point_attributes(self, geometry): pt = None if not geometry.isMultipart(): pt = geometry.geometry() else: if geometry.numGeometries() > 0: pt = geometry.geometryN(0) attrs = [] if pt: attrs.append(pt.x()) attrs.append(pt.y()) # add point z/m if self.export_z: attrs.append(pt.z()) if self.export_m: attrs.append(pt.m()) return attrs def line_attributes(self, geometry): return [self.distance_area.measureLength(geometry)] def polygon_attributes(self, geometry): area = self.distance_area.measureArea(geometry) perimeter = self.distance_area.measurePerimeter(geometry) return [area, perimeter]
GeoCat/QGIS
python/plugins/processing/algs/qgis/ExportGeometryInfo.py
Python
gpl-2.0
6,865
0.001165
#***************************************************************************** # Copyright (C) 2017 Lee Worden <worden dot lee at gmail dot com> # # Distributed under the terms of the GNU General Public License (GPL) v.2 # http://www.gnu.org/licenses/ #***************************************************************************** import graph_latex_patched from sage.all import * import dynamicalsystems from sage.misc.latex import _latex_file_ #from sage.symbolic.relation import solve from sage.symbolic.function_factory import function # constant 'enum' values for use with indexing class deps: index, sumover = range(0,2) def plot_boxmodel_graph( g, filename=None, inline=False, figsize=(6,6), empty_vertices=(), ellipsis_vertices=(), **options ): import itertools #print 'ellipsis vertices:', ellipsis_vertices lopts = { 'graphic_size': figsize, 'edge_labels': True, 'edge_thickness' : 0.02, #'edge_fills': True, #'edge_color': 'white', #'edge_thickness': 0.05 'vertex_shape': 'rectangle', 'vertices_empty': { x:True for x in empty_vertices }, 'vertex_colors': { x:'white' for x in ellipsis_vertices }, #'vertex_label_colors': { x:'white' for x in self._sources | self._sinks } } graph_latex_patched.setup_latex_preamble() gop = graph_latex_patched.GraphLatex(g) if inline: lopts['margins'] = (0.5,0.5,0.5,0.5) lopts.update( options ) #print 'lopts:',lopts if 'latex_options' in options: g.set_latex_options( **(options['latex_options']) ) gop.set_options( **lopts ) gl = gop.latex() xp = '' if inline: #LT = '\n\\vspace{24pt}\n' + gl + '\n\\vspace{24pt}\n' LT = gl else: if figsize[0] > 6.75 or figsize[1] > 9: latex.add_package_to_preamble_if_available('geometry') xp = '\\geometry{papersize={' + str(figsize[0] + 10) + 'cm,' + str(figsize[1] + 20) + 'cm}}\n' LT = _latex_file_( dynamicalsystems.wrap_latex( gl ), title='', extra_preamble=xp ) if filename is not None: #print 'plot to', filename LF = open( filename, 'w' ) LF.write( LT ) LF.close() return LT ## see BoxModel.plot_boxes() method below ## this is a transformation that supports plotting a box model ## graph using per capita flow rates rather than absolute rates def per_capita_rates(g): def to_per_capita(r,s): if s in r.variables(): return (r/s).collect_common_factors().expand() else: print 'Warning: rate ', str(r), 'not converted to per capita' return r return DiGraph( [ (v,w,to_per_capita(e,v)) for v,w,e in g.edge_iterator() ], multiedges=True, pos = g.get_pos() ) class BoxModel(SageObject): """Parent class for all kinds of box models. Note that since this gets its variables from a graph's vertices, rather than from indexers, it can't be used in adaptive dynamics. Subclasses that generate their boxes, maybe can. """ def __init__(self, graph, vars=None, parameters=None, parameter_dependencies={}, sources=(), sinks=(), aggregate_names=(), bindings=dynamicalsystems.Bindings()): # we are given a directed graph whose vertex labels are state # variables, representing fractions of total population, # and whose edge labels are flow rates. try: graph.edge_iterator() except AttributeError: try: self.__init__( graph._graph, graph._vars, sources=graph._sources, sinks=graph._sinks, aggregate_names=graph._aggregate_names, bindings=graph._bindings ) return except AttributeError: graph = DiGraph(graph) self._graph = graph self._graph.set_latex_options( edge_labels=True ) self._sources = Set( sources ) self._sinks = Set( sinks ) self._aggregate_names = aggregate_names if vars is None: vars = Set( graph.vertices() ) - self._sources - self._sinks self._vars = list(vars) print 'vars', self._vars, 'sources', self._sources, 'sinks', self._sinks def getvars(r): try: return r.variables() except AttributeError: return [] if parameters is None: # avoid namespace confusion with product.union #print 'make parameters'; sys.stdout.flush() parameters = sorted( list( reduce( lambda x,y: x.union(y), (set(getvars(r)) for f,t,r in graph.edges()), set() ).difference( self._vars, self._sources, self._sinks, self._aggregate_names ) ), key=str ) #print 'made parameters'; sys.stdout.flush() self._parameters = parameters print 'parameters:', parameters if False: self._parameter_dependencies = parameter_dependencies for p in self._parameters: if p not in self._parameter_dependencies: # infer connections between parameters and compartmentalization # for now, simple rule: # just connect it to the source variable of its arrow # TODO: inference including defined quantities like N #print 'infer dependencies for parameter', p for v,w,e in self._graph.edges(): try: vs = getvars(e) except AttributeError: vs = [] if p in vs: pd = [ v ] #print 'found', p, 'in arrow', e #print 'infer dependency on', v if p in self._parameter_dependencies and self._parameter_dependencies[p] != pd: #print 'but already inferred', self._parameter_dependencies[p] #print 'dependencies of parameter', p, 'are unclear, inferring no dependencies' pd = [] self._parameter_dependencies[p] = pd for p, pd in self._parameter_dependencies.items(): try: [ d[0] for d in pd ] except: self._parameter_dependencies[p] = [ (d,deps.index) for d in pd ] #print 'parameter dependencies:', self._parameter_dependencies self._bindings = bindings if self._graph.get_pos() is None: pos = { v:(i,0) for i,v in enumerate(self._vars) } pos.update( { v:(-1,i) for i,v in enumerate(self._sources) } ) pos.update( { v:(xx,i) for i,v in enumerate(self._sinks) for xx in (max(x for x,y in pos.itervalues()),) } ) self._graph.set_pos( pos ) def bind(self, *args, **vargs): bindings = dynamicalsystems.Bindings( *args, **vargs ) bound_graph = DiGraph( [ (bindings(v),bindings(w),bindings(e)) for v,w,e in self._graph.edge_iterator() ], multiedges=True, pos = { bindings(v):p for v,p in self._graph.get_pos().items() } if self._graph.get_pos() is not None else None ) return BoxModel( bound_graph, vars = [ bindings(v) for v in self._vars ], sources = Set( bindings(v) for v in self._sources ), sinks = Set( bindings(v) for v in self._sinks ), parameters = [ bindings(p) for p in self._parameters ], parameter_dependencies = { bindings(p):[(bindings(d),t) for d,t in pd] for p,pd in self._parameter_dependencies.items() }, aggregate_names = self._aggregate_names, bindings = self._bindings + bindings ) def add_transitions( self, trs ): # We take BoxModel to be an immutable object, so this operation # returns a new BoxModel. trs is a list of (source,target,rate) # tuples suitable for adding to self._graph #print 'add_transitions', trs #print 'parameters before', self._parameters nbm = deepcopy(self) nbm._graph.add_edges( trs ) #print self._vars for f,t,r in trs: try: #print r #print r.variables() #print Set( r.variables() ).difference( Set( self._vars ) ) nbm._parameters.update( Set( r.variables() ) - self._vars - self._aggregate_names ) except AttributeError: pass #print 'parameters after', nbm._parameters return nbm def reorder_latex_variables( self, ex ): #return ex # Sage likes to write "I S \beta" in unicode or whatever order - # we want "\beta S I", and more generally, first parameters and # then compartment names, in a sort of order given by the flow # of the transitions. Here we use left-to-right, top-to-bottom # order based on the positions given for compartments. # this function returns a sort of pseudo-expression that's only # suitable for printing, not for doing math with try: self._sorter except AttributeError: from collections import defaultdict sort_order_map = dict( ## parameters first, Greek letters before Roman [ (latex(v),(T,T)) for v in self._parameters for T in [-1e+10 if latex(v)[0] == '\\' or latex(v)[0:2] == '{\\' else -0.9e+10] ] + ## then compartment names, in order of the graph layout [ (latex(vv),(pp[0],-pp[1])) for vv,pp in self._graph.get_pos().items() ] + ## then any aggregate names [ (latex(v),(1e+10,1e+10)) for v in self._aggregate_names ] ) # this converter is defined later in this file self._sorter = sort_latex_variables( ## parameters then compartments sort_order_map, ## numbers before anything order_numbers_as=(-1e+12,-1e+12), ## other expressions just after numbers order_unknown_as=(-1e+11,-1e+11) ) #print 'use', self._sorter._map, 'on', latex(ex) try: return self._sorter( ex ) except AttributeError: # ex is not an expression return ex def __repr__(self): try: return '(BoxModel with compartments ' + str(tuple(self._vars)) + ')' except AttributeError: # _vars not assigned yet return '(BoxModel)' def plot_boxes( self, filename=None, inline=False, figsize=(6,6), transform_graph=None, ellipsis_vertices=(), **options ): g = self._graph ## apply the user-supplied transform if any ## for example, use transform_graph=per_capita_rates to ## plot using per capita rather than absolute flow rates if transform_graph is not None: g = transform_graph(g) try: ## free_product may assign this member ellipsis_vertices = Set( self._ellipsis_vars ) def ellipsize( g ): def ellipsize_vertex( v ): if v in ellipsis_vertices: return SR.symbol( str(v), latex_name='\ldots' ) else: return v return DiGraph( [ ( ellipsize_vertex(v), ellipsize_vertex(w), r ) for v,w,r in g.edge_iterator() ], pos = { ellipsize_vertex(v):p for v, p in g.get_pos().iteritems() } ) g = ellipsize(g) except AttributeError: ellipsis_vertices = () ## tweak the latex representation of the rates g = DiGraph( [ g.vertices(), [ (v,w,self.reorder_latex_variables(e)) for v,w,e in g.edge_iterator() ] ], format='vertices_and_edges', multiedges=True, pos = g.get_pos() ) #print 'plot_boxes, sources', self._sources, ', sinks', self._sinks return plot_boxmodel_graph( g, filename=filename, inline=inline, figsize=figsize, empty_vertices=self._sources | self._sinks, ellipsis_vertices=ellipsis_vertices, **options ) def plot( self, *args, **aargs ): def lx(s): return '$%s$'%latex(s) lfg = DiGraph( [[lx(s) for s in tup] for tup in self._graph.edge_iterator() ], multiedges=True ) vargs = { 'edge_labels' : True, 'talk' : True } if 'pos' not in aargs and self._graph.get_pos() is not None: vargs['pos'] = { lx(v) : p for v,p in self._graph.get_pos().items() } vargs.update( aargs ) #print 'plot vargs:', vargs return lfg.plot( *args, **vargs ) def transpose_graph_in_place( self ): self._graph.set_pos( { v:(-y,-x) for v,(x,y) in self._graph.get_pos().iteritems() } ) def transpose_graph( self ): nm = deepcopy( self ) nm.transpose_graph_in_place() return nm def aggregate_compartments( self, compartment_aggregation ): aggregate = {} for vt in self._graph.vertex_iterator(): ## what if vt is simple and doesn't have operands aggregate.setdefault( tuple( compartment_aggregation( vt.operands() ) ), [] ).append( vt.operands() ) ## aggregate is { new vertex: [old vertices], ... } print 'aggregate:', aggregate flow_sums = {} for v in self._graph.vertex_iterator(): av = compartment_aggregation( v ) if av not in flow_sums: flow_sums[av] = {} for _,w,e in self._graph.outgoing_edge_iterator(v): aw = compartment_aggregation( w ) flow_sums[av].setdefault( aw, SR(0) ) flow_sums[av][aw] += e ## flow_sums[av][aw] is sum of all transitions from ## (aggregated vertex) av to aw ## transitions are in terms of old vertex names ## now do substitutions to transform the transition sums agg_eqns, agg_symbols = [], [] agg_subs = dynamicalsystems.Bindings() for newt,oldts in aggregate.items(): print 'will combine', sum( oldts ), '==', newt agg_symbols.append( oldts[0] ) agg_eqns.append( oldts[0] == newt - sum( oldts[1:] ) ) agg_graph_dict = {} for av, ve in flow_sums.iteritems(): agg_graph_dict[av] = {} for aw, e in ve.iteritems(): sym = SR.symbol() print e, solns = solve( [ sym == e ] + agg_eqns, sym, *agg_symbols, solution_dict=True ) #print 'solve', [ sym == e ] + agg_eqns, ',', [sym] + agg_symbols, '\n ', solns if len(solns) == 1: #print ' ', maxima(sym), [str(k) == str(sym) for k in solns[0].keys()] el = [ex for k,ex in solns[0].items() if str(k) == str(sym)] print '==>', el[0] agg_graph_dict[av][aw] = el[0] else: raise RuntimeError, 'Could not simplify expression ' + str(e) + ':' + str(solns) print 'agg_graph_dict', agg_graph_dict #self._vc_eqns = vc_eqns ## make list of transformed variables ## they are in those dicts, but we want the order agg_vars = [] for v in self._vars: av = compartment_aggregation( v ) if av not in agg_vars: agg_vars.append(av) print 'agg_vars', agg_vars ## position the aggregates by matching them to a subset of original ## compartments apos = {} for t,p in self._graph.get_pos().iteritems(): at = compartment_aggregation( t ) if at not in apos: apos[at] = p print 'apos', apos return boxmodel.BoxModel( DiGraph( agg_graph_dict, pos=apos ), agg_vars ) def combine_arrows( self ): #return self.aggregate_compartments( lambda x:x ) d = {} for v,w,r in self._graph.edge_iterator(): d[(v,w)] = d.get( (v,w), 0 ) + r ee = [ (v,w,r) for (v,w),r in d.iteritems() ] b = BoxModel( DiGraph( ee, pos=self._graph.get_pos() ), self._vars ) return b def separate_arrows( self ): plus = SR('x+1').operator() def terms_iterator( e ): e = e.expand() if e.operator() == plus: for t in e.operands(): for tt in terms_iterator(t): yield t else: yield e return BoxModel( DiGraph( [ (v,w,ee) for v,w,e in self._graph.edge_iterator() for ee in terms_iterator(e) ], pos = self._graph.get_pos(), multiedges=True ), self._vars ) def jump_process(self): try: self._jump_process except AttributeError: #print 'making BoxModel JumpProcess' nvars = self._sources | self._sinks vars = [ v for v in self._vars if v not in nvars ] var_index = { v:i for i,v in enumerate(vars) } #var_index.update( { v:None for v in nvars } ) #for x in self._sources.union( self._sinks ): # var_index[x] = None #print 'var_index:',var_index def to_r( s, t ): r = [ 0 for v in vars ] if s in var_index: r[var_index[s]] = -1 if t in var_index: r[var_index[t]] = 1 return r self._jump_process = dynamicalsystems.JumpProcess( vars, [ (to_r(s,t),rate) for s,t,rate in self._graph.edges() ], bindings=self._bindings ) return self._jump_process ## for forward_equations see boxkolmogorov.py def backward_equations(self, N, q_name='q'): return self.jump_process().backward_equations(N,q_name) def generator_matrix( self, N, rate_ring=QQ ): return self.jump_process().generator_matrix(N, rate_ring) def ode_flow(self): return self.jump_process().deterministic_flow() def ode(self, time_variable=SR.symbol('t'), bindings=dynamicalsystems.Bindings()): return self.jump_process().deterministic_ode(time_variable, bindings) def difference_equation(self, step=1, time_variable=SR.symbol('t'), bindings=dynamicalsystems.Bindings()): return self.jump_process().approximate_deterministic_difference_equation( step=step, time_variable=time_variable, bindings=bindings ) def micro_transitions( self ): # This could produce micro transitions but it isn't right so far # TODO: move this to JumpProcess # (in addition to making it work) ltx = dynamicalsystems.latex_output_base( dynamicalsystems.write_to_string() ) lines = [] for source, target, rate in self._graph.edge_iterator(): mu = MakeMicro( self, source ) ut = mu( rate ) #print str(ut); sys.stdout.flush() lines += [ r' & ' + latex(mu.sigma_fn(SR('x'))) + r'\to' + latex(target) + r' \quad\text{ at rate } ' + latex( ut ) ] ltx.write_align( *lines ) return ltx._output._str # useful parent class: expression converter that doesn't # do anything from sage.symbolic.expression_conversions import SubstituteFunction class IdentityConverter(SubstituteFunction): def __init__(self): pass def composition(self, ex, operator): # override the parent class's function replacing step return operator(*map(self, ex.operands())) class MakeMicro(IdentityConverter): _mul = SR('a*b').operator() from sage.symbolic.function_factory import function delta_fn = function('delta', latex_name=r'\delta') sigma_fn = function('sigma', print_latex_func=lambda self, x:r'\sigma_{%s}' % latex(x)) bm_sum = function( 'sum', print_latex_func=lambda self, x, s, ex:r'\sum_{%s\in %s}%s' %( latex(x), latex(s), latex(ex) ) ) bm_indicator = function( 'indicator', print_latex_func=lambda self, ev:r'\mathbb{1}\left(%s\right)' % latex(ev) ) bm_index_param = function( 'bm_index_param' ) def __init__(self, model, source): self._model = model self._source = source self._working = False self._tags = { s : SR.symbol( 'text'+str(s), latex_name=r'\texttt{%s}'%str(s) ) for s in self._model._vars } def __call__(self, ex): if self._working: return super(MakeMicro,self).__call__(ex) self._working = True tx = super(MakeMicro,self).__call__( ex / self._source ) self._working = False return ( self.bm_indicator( self.sigma_fn( SR.symbol('x') ) == self._tags[self._source] ) * tx.subs( { s : self.bm_sum( SR.symbol('y'), SR.symbol('X'), 1 / SR('N') * self.bm_indicator( self.sigma_fn( SR('y') ) == self._tags[s] ) ) for s in self._model._vars } ) ) def arithmetic(self, ex, operator): # do special handling to products of things, before processing the # things, to catch inner products if operator == self._mul: return self.do_inner_product( *ex.operands() ) else: return reduce( operator, *map(self, ex.operands()) ) def symbol(self, s): return self.do_inner_product( s ) # just in case def do_inner_product(self, *args): # leave multiplications as is, except in the case of a # parameter dependency marked "sumover": convert that from # a regular multiplication to an inner product. #print 'processing product', args margs = list(args) sumover = [] dummy_list = ['y', 'z', 'u', 'v', 'w', 's', 't', 'p', 'q', 'r'] for p,pd in self._model._parameter_dependencies.items(): if p in margs: #print 'found', p, 'in factors:', args if all( d in margs + [self._source] for d,x in pd ): #print 'found all of its deps', [d for d,x in pd], 'as well' indices_for_p = [] p_times = SR(1) for d,ss in pd: if ss == deps.sumover: dummy_var = SR.symbol( dummy_list.pop(0) ) indices_for_p.append( dummy_var ) sumover.append( dummy_var ) #print 'will sum over', dummy_var, 'in', d; sys.stdout.flush() margs[margs.index(d)] = 1 p_times *= self.bm_indicator( self.sigma_fn( dummy_var ) == self._tags[d] ) #print 'made it through equality'; sys.stdout.flush() elif d == self._source: indices_for_p += [SR('x')] else: raise ValueError, 'I am confused about dependence on ' + str(d) index_of_p = margs.index(p) margs[index_of_p] = self.bm_index_param( p, *indices_for_p ) * p_times for dv in reversed(sumover): margs[index_of_p] = self.bm_sum( dv, SR.symbol('X'), 1 / SR('N') * margs[index_of_p] ) margs[index_of_p] = margs[index_of_p].substitute_function( self.bm_index_param, lambda *args: dynamicalsystems.subscriptedsymbol( *args ) ) #print margs else: raise RuntimeError, ( "Missing parameter dependencies in expression " + str( reduce( self._mul, args ) ) ) expr = reduce( self._mul, margs ) #print 'becomes', expr return expr class sort_latex_variables(sage.symbolic.expression_conversions.ExpressionTreeWalker): def __init__(self, sort_order_map, order_numbers_as=-oo, order_unknown_as=oo): #print 'sort_order_map is', sort_order_map self._map = sort_order_map self._number_order = order_numbers_as self._unknown_order = order_unknown_as return super(sort_latex_variables,self).__init__(SR(0)) def arithmetic(self, ex, operator): if operator == (2*SR.symbol('x')).operator(): #print 'reorder latex product of', ex.operands() ## sort the factors in a multiplication def keyfn(x): try: return self._map[latex(x)] except KeyError: if x.is_numeric(): return self._number_order else: return self._unknown_order ll = sorted( ex.operands(), key=keyfn ) minusop = (SR.symbol('x')-1).operator() # it's actually + ## special case: a factor is -(x-1) : ## we will write that as (1-x) ## if there's a factor of -1, look for a subtraction rev = [ e for e in ll if e.operator() == minusop ] if -1 in ll else [] if len( rev ) > 0: ## there will only be one -1 ll = [ e for e in ll if e != -1 ] rev = rev[:1] #print 'will reverse', rev ## if there are factors of y^-1 ## we will put those as y in a denominator denom = [ d for d in ll if d.operator()==(1/SR.symbol('x')).operator() and d.operands()[1] == SR(-1) ] ll = [ n for n in ll if n not in denom ] denom = [ 1/d for d in denom ] ## function to render each factor in latex def to_lx( ex, within ): ## subtractions if ex.operator() == minusop: ## if reversed, write backwards if ex in rev: return r'\left({}-{}\right)'.format(latex(-ex.operands()[1]),latex(ex.operands()[0])) ## otherwise, write forwards #else: #return ''.join( (r'\left(',latex(ex),r'\right)') ) ## write additions if ex.operator() == (SR.symbol('x')+1).operator() and within: #print 'add () to', ex return r'\left({}\right)'.format(latex(ex)) ## if it's a compound symbol, put it in parens if ex.is_symbol(): lx = latex(ex) lxinner = lx while lxinner[0] == '{' and lxinner[-1] == '}': lxinner = lxinner[1:-1] if len(lxinner) > 1 and '_' not in lxinner and '^' not in lxinner and not( lxinner[0] == '\\' and lxinner[1:].isalpha() ): #print 'add () to', lxinner return r'\left({}\right)'.format(lxinner) else: #print 'a symbol:', lx return lx ## anything else, use default latex rendering #print ' default latex,', latex(ex) return latex(ex) ## combine the factors in the numerator #print ll lname = ' '.join(to_lx(v, len(ll)>1) for v in ll) ## if any factors in denominator, combine them and make fraction if len(denom) > 0: #print '/', denom lden = ' '.join(to_lx(d, len(denom)>1) for d in denom) lname = r'\frac{'+lname+'}{'+lden+'}' #print latex(ex), ' ==> ', lname Msym = SR.symbol( 'M_{}'.format( ZZ.random_element(1e+10) ), latex_name=lname ) return Msym elif ( operator == (2+SR.symbol('x')).operator() and ex.operands()[0].operator() == (2*SR.symbol('x')).operator() and SR(-1) in ex.operands()[0].operands() and ex.operands()[1] == 1): #print 'normalise', latex(ex), 'to 1-x form' lname = latex(ex.operands()[1])+'-'+latex(-ex.operands()[0]) Msym = SR.symbol( 'M_{}'.format( ZZ.random_element(1e+10) ), latex_name=lname ) return Msym #print 'typeset', latex(ex), 'as is' #print 'operator is', str(ex.operator()) return super(sort_latex_variables,self).arithmetic(ex,operator)
tcporco/SageBoxModels
boxmodel/boxmodel.py
Python
gpl-2.0
26,560
0.041679
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.contrib.sitemaps import Sitemap from . import models class BlogSitemap(Sitemap): changefreq = "daily" priority = 0.5 def items(self): return models.Post.objects.filter(is_draft=False) def lastmod(self, obj): return obj.update_time class PageSitemap(Sitemap): changefreq = "monthly" priority = 0.5 def items(self): return models.Page.objects.filter(is_draft=False) def lastmod(self, obj): return obj.update_time # class CategorySitemap(Sitemap): # changefreq = "weekly" # priority = 0.6 # def items(self): # return models.Category.objects.all() # class TagSitemap(Sitemap): # changefreq = "weekly" # priority = 0.6 # def items(self): # return models.Tag.objects.all() sitemaps = { 'blog': BlogSitemap, 'page': PageSitemap, # 'category': CategorySitemap, # 'tag': TagSitemap, }
flyhigher139/mayblog
blog/main/sitemaps.py
Python
gpl-2.0
968
0.004132
from django import forms from django.conf import settings from django.contrib.admin.helpers import normalize_fieldsets, AdminReadonlyField, AdminField from django.contrib.admin.templatetags.admin_static import static from django.utils.safestring import mark_safe class AdminForm(object): def __init__(self, form, fieldsets, readonly_fields=None): self.form, self.fieldsets = form, normalize_fieldsets(fieldsets) if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for name, options in self.fieldsets: yield Fieldset(self.form, name, readonly_fields=self.readonly_fields, **options ) def first_field(self): try: fieldset_name, fieldset_options = self.fieldsets[0] field_name = fieldset_options['fields'][0] if not isinstance(field_name, basestring): field_name = field_name[0] return self.form[field_name] except (KeyError, IndexError): pass try: return iter(self.form).next() except StopIteration: return None def _media(self): media = self.form.media for fs in self: media = media + fs.media return media media = property(_media) class Fieldset(object): def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(), description=None): self.form = form self._name, self.fields = name, fields self.classes = u' '.join(classes) self._description = description self.readonly_fields = readonly_fields @property def name(self): return self._name if self._name else self.form.title @property def description(self): return self._description if self._description else self.form.description def _media(self): if 'collapse' in self.classes: extra = '' if settings.DEBUG else '.min' js = ['jquery%s.js' % extra, 'jquery.init.js', 'collapse%s.js' % extra] return forms.Media(js=[static('admin/js/%s' % url) for url in js]) return forms.Media() media = property(_media) def __iter__(self): for field in self.fields: yield Fieldline(self.form, field, self.readonly_fields) class Fieldline(object): def __init__(self, form, field, readonly_fields=None, model_admin=None): self.form = form # A django.forms.Form instance if not hasattr(field, "__iter__"): self.fields = [field] else: self.fields = field self.model_admin = model_admin if readonly_fields is None: readonly_fields = () self.readonly_fields = readonly_fields def __iter__(self): for i, field in enumerate(self.fields): if field in self.readonly_fields: yield SingleReadonlyField(self.form, field, is_first=(i == 0)) else: yield SingleField(self.form, field, is_first=(i == 0)) def errors(self): return mark_safe(u'\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n')) class SingleField(AdminField): pass # def __init__(self, form, field, is_first): # super(SingleField, self).__init__(form, field, is_first) # self.field = form[field] # A django.forms.BoundField instance # self.is_first = is_first # Whether this field is first on the line # self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput) class SingleReadonlyField(AdminReadonlyField): def __init__(self, form, field, is_first): # Make self.field look a little bit like a field. This means that # {{ field.name }} must be a useful class name to identify the field. # For convenience, store other field-related data here too. if callable(field): class_name = field.__name__ != '<lambda>' and field.__name__ or '' else: class_name = field self.field = { 'name': class_name, 'label': form[field].label, 'field': field, 'help_text': form[field].help_text } self.form = form self.is_first = is_first self.is_checkbox = False self.is_readonly = True def contents(self): return self.form[self.field]
joke2k/django-options
django_options/formset.py
Python
bsd-3-clause
4,546
0.00264
import codecs f = codecs.open("/Users/hjp/Downloads/task/data/dev.txt", 'r', 'utf-8') for line in f.readlines(): print(line) sents = line.split('\t') print(sents[1] + "\t" + sents[3]) for i in range(len(sents)): print(sents[i]) f.close()
hjpwhu/Python
src/hjp.edu.nlp.data.task/semeval.py
Python
mit
269
0.003717
__author__ = "Johannes Köster" __copyright__ = "Copyright 2015, Johannes Köster" __email__ = "koester@jimmy.harvard.edu" __license__ = "MIT" import os import traceback from tokenize import TokenError from snakemake.logging import logger def format_error(ex, lineno, linemaps=None, snakefile=None, show_traceback=False): if linemaps is None: linemaps = dict() msg = str(ex) if linemaps and snakefile and snakefile in linemaps: lineno = linemaps[snakefile][lineno] if isinstance(ex, SyntaxError): msg = ex.msg location = (" in line {} of {}".format(lineno, snakefile) if lineno and snakefile else "") tb = "" if show_traceback: tb = "\n".join(format_traceback(cut_traceback(ex), linemaps=linemaps)) return '{}{}{}{}'.format(ex.__class__.__name__, location, ":\n" + msg if msg else ".", "\n{}".format(tb) if show_traceback and tb else "") def get_exception_origin(ex, linemaps): for file, lineno, _, _ in reversed(traceback.extract_tb(ex.__traceback__)): if file in linemaps: return lineno, file def cut_traceback(ex): snakemake_path = os.path.dirname(__file__) for line in traceback.extract_tb(ex.__traceback__): dir = os.path.dirname(line[0]) if not dir: dir = "." if not os.path.isdir(dir) or not os.path.samefile(snakemake_path, dir): yield line def format_traceback(tb, linemaps): for file, lineno, function, code in tb: if file in linemaps: lineno = linemaps[file][lineno] if code is not None: yield ' File "{}", line {}, in {}'.format(file, lineno, function) def print_exception(ex, linemaps): """ Print an error message for a given exception. Arguments ex -- the exception linemaps -- a dict of a dict that maps for each snakefile the compiled lines to source code lines in the snakefile. """ tb = "Full " + "".join(traceback.format_exception(type(ex), ex, ex.__traceback__)) logger.debug(tb) if isinstance(ex, SyntaxError) or isinstance(ex, IndentationError): logger.error(format_error(ex, ex.lineno, linemaps=linemaps, snakefile=ex.filename, show_traceback=True)) return origin = get_exception_origin(ex, linemaps) if origin is not None: lineno, file = origin logger.error(format_error(ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True)) return elif isinstance(ex, TokenError): logger.error(format_error(ex, None, show_traceback=False)) elif isinstance(ex, MissingRuleException): logger.error(format_error(ex, None, linemaps=linemaps, snakefile=ex.filename, show_traceback=False)) elif isinstance(ex, RuleException): for e in ex._include + [ex]: if not e.omit: logger.error(format_error(e, e.lineno, linemaps=linemaps, snakefile=e.filename, show_traceback=True)) elif isinstance(ex, WorkflowError): logger.error(format_error(ex, ex.lineno, linemaps=linemaps, snakefile=ex.snakefile, show_traceback=True)) elif isinstance(ex, KeyboardInterrupt): logger.info("Cancelling snakemake on user request.") else: traceback.print_exception(type(ex), ex, ex.__traceback__) class WorkflowError(Exception): @staticmethod def format_args(args): for arg in args: if isinstance(arg, str): yield arg else: yield "{}: {}".format(arg.__class__.__name__, str(arg)) def __init__(self, *args, lineno=None, snakefile=None, rule=None): super().__init__("\n".join(self.format_args(args))) if rule is not None: self.lineno = rule.lineno self.snakefile = rule.snakefile else: self.lineno = lineno self.snakefile = snakefile self.rule = rule class WildcardError(WorkflowError): pass class RuleException(Exception): """ Base class for exception occuring withing the execution or definition of rules. """ def __init__(self, message=None, include=None, lineno=None, snakefile=None, rule=None): """ Creates a new instance of RuleException. Arguments message -- the exception message include -- iterable of other exceptions to be included lineno -- the line the exception originates snakefile -- the file the exception originates """ super(RuleException, self).__init__(message) self._include = set() if include: for ex in include: self._include.add(ex) self._include.update(ex._include) if rule is not None: if lineno is None: lineno = rule.lineno if snakefile is None: snakefile = rule.snakefile self._include = list(self._include) self.lineno = lineno self.filename = snakefile self.omit = not message @property def messages(self): return map(str, (ex for ex in self._include + [self] if not ex.omit)) class InputFunctionException(WorkflowError): pass class MissingOutputException(RuleException): pass class IOException(RuleException): def __init__(self, prefix, rule, files, include=None, lineno=None, snakefile=None): message = ("{} for rule {}:\n{}".format(prefix, rule, "\n".join(files)) if files else "") super().__init__(message=message, include=include, lineno=lineno, snakefile=snakefile, rule=rule) class MissingInputException(IOException): def __init__(self, rule, files, include=None, lineno=None, snakefile=None): super().__init__("Missing input files", rule, files, include, lineno=lineno, snakefile=snakefile) class PeriodicWildcardError(RuleException): pass class ProtectedOutputException(IOException): def __init__(self, rule, files, include=None, lineno=None, snakefile=None): super().__init__("Write-protected output files", rule, files, include, lineno=lineno, snakefile=snakefile) class UnexpectedOutputException(IOException): def __init__(self, rule, files, include=None, lineno=None, snakefile=None): super().__init__("Unexpectedly present output files " "(accidentally created by other rule?)", rule, files, include, lineno=lineno, snakefile=snakefile) class AmbiguousRuleException(RuleException): def __init__(self, filename, job_a, job_b, lineno=None, snakefile=None): super().__init__( "Rules {job_a} and {job_b} are ambiguous for the file {f}.\n" "Expected input files:\n" "\t{job_a}: {job_a.input}\n" "\t{job_b}: {job_b.input}".format(job_a=job_a, job_b=job_b, f=filename), lineno=lineno, snakefile=snakefile) self.rule1, self.rule2 = job_a.rule, job_b.rule class CyclicGraphException(RuleException): def __init__(self, repeatedrule, file, rule=None): super().__init__("Cyclic dependency on rule {}.".format(repeatedrule), rule=rule) self.file = file class MissingRuleException(RuleException): def __init__(self, file, lineno=None, snakefile=None): super().__init__( "No rule to produce {} (if you use input functions make sure that they don't raise unexpected exceptions).".format( file), lineno=lineno, snakefile=snakefile) class UnknownRuleException(RuleException): def __init__(self, name, prefix="", lineno=None, snakefile=None): msg = "There is no rule named {}.".format(name) if prefix: msg = "{} {}".format(prefix, msg) super().__init__(msg, lineno=lineno, snakefile=snakefile) class NoRulesException(RuleException): def __init__(self, lineno=None, snakefile=None): super().__init__("There has to be at least one rule.", lineno=lineno, snakefile=snakefile) class IncompleteFilesException(RuleException): def __init__(self, files): super().__init__( "The files below seem to be incomplete. " "If you are sure that certain files are not incomplete, " "mark them as complete with\n\n" " snakemake --cleanup-metadata <filenames>\n\n" "To re-generate the files rerun your command with the " "--rerun-incomplete flag.\nIncomplete files:\n{}".format( "\n".join(files))) class IOFileException(RuleException): def __init__(self, msg, lineno=None, snakefile=None): super().__init__(msg, lineno=lineno, snakefile=snakefile) class ClusterJobException(RuleException): def __init__(self, job, jobid, jobscript): super().__init__( "Error executing rule {} on cluster (jobid: {}, jobscript: {}). " "For detailed error see the cluster log.".format(job.rule.name, jobid, jobscript), lineno=job.rule.lineno, snakefile=job.rule.snakefile) class CreateRuleException(RuleException): pass class TerminatedException(Exception): pass
vangalamaheshh/snakemake
snakemake/exceptions.py
Python
mit
10,433
0.000192
import maya.cmds as cmds import maya.utils as utils import threading import time import sys from PyQt4 import QtCore, QtGui pumpedThread = None app = None def pumpQt(): global app def processor(): app.processEvents() while 1: time.sleep(0.01) utils.executeDeferred( processor ) def initializePumpThread(): global pumpedThread global app if pumpedThread == None: app = QtGui.QApplication(sys.argv) pumpedThread = threading.Thread( target = pumpQt, args = () ) pumpedThread.start()
lordtangent/arsenalsuite
cpp/apps/absubmit/maya/pumpThread.py
Python
gpl-2.0
504
0.049603