prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
ss_vals = [] for line in rfile: taw_rss = line.split(',') taw = int(taw_rss[0]) rss = float(taw_rss[1]) taw_vals.append(taw) rss_vals.append(rss) # get the average daily rss in mm rss_vals_avg_daily = [((rss / 11.0) / 365.0) for rss in rss_vals] print 'the rss avg daily error \n', rss_vals_avg_daily error_reduced_lst = [] for i in range(len(rss_vals_avg_daily)): # print 'i', i if i == 0: error_reduced_lst.append('') elif i > 0: # calculate the error reduced by each taw step error_reduced = rss_vals_avg_daily[i] - rss_vals_avg_daily[i-1] error_reduced_lst.append(error_reduced) # elif i == len(rss_vals_avg_daily) print 'the error reduced list \n', error_reduced_lst # set the first value of the list to the second value error_reduced_lst[0] = error_reduced_lst[1] print 'the error reduced list \n', error_reduced_lst # round the values to the 2nd decimal place error_reduced_lst= [round(i, 2) for i in error_reduced_lst] # # select the TAW after which error reduced is no longer greater than 0.01 # for taw, reduced_error in zip(taw_vals, error_reduced_lst): # print 'taw {}, re {}'.format(taw, reduced_error) indx_lst = [] for i, re in enumerate(error_reduced_lst): if abs(re) <= 0.01: indx_lst.append(i) print 'the index list\n', indx_lst consecutives = [] for i in range(len(indx_lst)+1): if i > 0 and i < (len(indx_lst)-1): print i if indx_lst[i + 1] == indx_lst[i] + 1: consecutives.append(indx_lst[i]) elif i == len(indx_lst)-1: if indx_lst[i] -1 == indx_lst[i-1]: consecutives.append(indx_lst[i-1]) consecutives.append(indx_lst[i]) print 'consecutives \n', consecutives # take the first index after which the reduced error is consistently less than or equal to 0.01 target_index = consecutives[0] # taw at the target index is the optimum taw optimum_taw = taw_vals[target_index] print 'optimum taw', optimum_taw else: print 'running' # open rss dict from yml file for testing with open(rss_path, 'r') as rfile: rss = yaml.load(rfile) print 'optimizing taw' # get taw, rss arrays out. taw_vals = rss['taw'] rss_arrs = rss['rss'] # # slice the array for testing so you can see it change or not... # rss_arrs = [rss[200:220, 200:220] for rss in rss_arrs] print 'len of rss arrs', len(rss_arrs) # get the average daily rss in mm for an 11 year time period todo - these outputs look strange rss_vals_avg_daily = [((rss / 11.0) / 365.0) for rss in rss_arrs] # output average daily rss as images for better visualization geotiff_output(taw_vals, rss_vals_avg_daily, geo_info, namekey='daily_rss', outpath=output_path) print 'the rss avg daily error \n', len(rss_vals_avg_daily) error_reduced_lst = [] for i in range(len(rss_vals_avg_daily)): print 'i', i if i == 0: error_reduced_lst.append('') elif i > 0: # calculate the error reduced by each taw step todo - these should be positive if error is DECREASING error_reduced = rss_vals_avg_daily[i] - rss_vals_avg_daily[i - 1] error_reduced_lst.append(error_reduced) # elif i == len(rss_vals_avg_daily) print 'the error reduced list \n', error_reduced_lst # set the first value of the list to the second value error_reduced_lst[0] = error_reduced_lst[1] print 'the error reduced list \n', error_reduced_lst # output ERROR_REDUCED as images geotiff_output(taw_vals, error_reduced_lst, geo_info, namekey='error_reduced', outpath=output_path) # make all errors positive by taking the absolute value todo - what are the implications of taking the absolute value? It may mess up the algorithm error_reduced_lst = [np.absolute(i) for i in error_reduced_lst] # output ERROR_REDUCED as images geotiff_output(taw_vals, error_reduced_lst, geo_info, namekey='error_reduced_positive', outpath=output_path) # round the values to the 2nd decimal place FOR AN ARRAY error_reduced_lst = [np.round(i, 2) for i in error_reduced_lst] # output ERROR_REDUCED as images geotiff_output(taw_vals, error_reduced_lst, geo_info, namekey='error_reduced_positive_rounded', outpath=output_path) # # select the TAW after which error reduced is no longer greater than 0.01 # prepare to store three dimensional arrays with dstack value_shape = rss_arrs[0].shape three_d_shape = (value_shape[0], value_shape[1], 0) # for storing the boolean for the expression: rss value < 0.01 # todo - should this be np.zeros or is np.empty better? # reduced_error_tab = np.zeros(three_d_shape, dtype=bool) reduced_error_tab = np.empty(three_d_shape) # for storing the minimum taw taw_tab = np.empty(three_d_shape) smaller_than_list = [] for taw, error_array in zip(taw_vals, error_reduced_lst): print 'checking rss for taw: {}'.format(taw) # make each taw into an array so we can index it taw_arr = np.full(error_array.shape, taw, dtype='float64') # # we only want to store values that
are less than or equal to 0.01 when rounded (rounding handled earlier) # smaller_than = error_array <= 0.01 # get the boolean where error array is less than 0.05 smaller_than = error_array <= 0.05 # print'smaller than array \n', smaller_than # we append the smaller_than array as an int for testing smaller_than_list.append(smaller_than.asty
pe(int)) # append the smaller than array to reduced error tab with dstack reduced_error_tab = np.dstack((reduced_error_tab, smaller_than)) # append the taw array to a 3d array taw_tab = np.dstack((taw_tab, taw_arr)) # print '3d array True for error values less than or equal to 0.01 otherwise, False \n', reduced_error_tab geotiff_output(taw_vals, smaller_than_list, geo_info, namekey='smaller_than', outpath=output_path) # 1) go through the 3d array of true false from start to finish, extract true/false as list along 3rd dimension # 2) go through that list and get the indices of the true values # 3) get the indices that are consecutive # 4) take the first of the consecutive indices and grab the corresponding TAW. # 5) put the TAW back in a 2d array where it belongs. # This will hold the optimized TAW (2d array) optimum_taw_disagg = np.zeros(rss_arrs[0].shape) # iterate through the 3d array cols, rows, vals = reduced_error_tab.shape # print 'cols {}, rows {}, vals {}'.format(cols, rows, vals) for i in range(cols): for j in range(rows): true_indices = [] taw_lst = [] for k in range(vals): taw = taw_tab[i, j, k] # print 'taw is ', taw taw_lst.append(taw) if reduced_error_tab[i, j, k]: true_indices.append(k) # print 'true indices {} for ({},{})'.format(true_indices, i, j) # based on optional setting take the taw value based on the first instance that the error reduction falls below the threshold if hair_trigger: try: target_index = true_indices[0] except IndexError:
# Time: O(m + n) # Space: O(min(m, n)) # Given two arrays, write a function to compute their intersection. # # Example: # Given nums1 = [1, 2, 2, 1], nums2 = [2, 2], return [2]. # # Note: # Each element in the result must be unique. # The result can be in any order. # Hash solution. class Solution(object): def intersection(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ if len(nums1) > len(nums2): return self.intersection(nums2, nums1) lookup = set() for i in nums1: lookup.add(i) res = [] for i in nums2: if i in lookup: res += i, lookup.discard(i) return res def intersection2(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ return list(set(nums1) & set(nums2)) # Time: O(max(m, n) * log(max(m, n))) # Space: O(1) # Binary search solution. class Solution2(object): def intersection(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ if len(nums1) > len(nums2): return self.intersection(nums2, nums1) def binary_search(compare, nums, left, right, target): while left < right: mid = left + (right - left) / 2 if compare(nums[mid], target): right = mid else: left = mid + 1 return left nums1.sort(), nums2.sort() res = [] left = 0 for i in nums1: left = binary_search(lambda x, y: x >= y, nums2, left, len(nums2), i) if left != len(nums2) and nums2[left] == i: res += i, left = binary_search(lambda x, y: x > y, nums2, left, len(nums2), i) return res # Time: O(max(m, n) * log(max(m, n))) # Space: O(1) # Two pointers solution. class Solution3(object): def intersection(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: List[int] """ nums1.sor
t(), nums2.sort() res = [] it1, it2 = 0, 0 while it1 < len(nums1) and it2 < len(nums2): if nums1[it1] < nums2[it2]: it1 += 1 elif nums1[it1] > nums2[it2]: it2
+= 1 else: if not res or res[-1] != nums1[it1]: res += nums1[it1], it1 += 1 it2 += 1 return res
import sys from operator import add from pyspark import SparkContext if __name__ == "__main__": if len(sys.argv) < 3: print >
> sys.stderr, \ "Usage: PythonWordCount <master> <file>" exit(-1) sc = SparkContext(sys.argv[1], "PythonWordCount") lines = sc.textFile(sys.argv[2], 1) counts = lines.flatMap(lambda x: x.split(' ')) \ .map(lambda x: (x, 1)) \ .reduceByKey(add) output = counts.collect() for (word, count) in output: print "%s : %i" % (word, count
)
""" Django settings for jstest project. Generated by 'django-admin startproject' using Django 1.10.4. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '(n=5&yvpo-9!=db58cbix!za-$30^osiq1i42o42xh8)9j81i1' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'samplepage', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'jstest.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'jstest.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib
.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE
= 'Asia/Seoul' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'samplepage/statics'), )
# -*- coding: utf8 -*- __author__ = 'shin' import jieba namelist_answer=[] ''' namelist_answer.append('[slot_name]。') namelist_answer.append('叫[slot_name]。') namelist_answer.append('姓名是[slot_name]。') namelist_answer.append('我是[slot_name]。') namelist_answer.append('您好,我叫[slot_name]。') namelist_answer.append('[slot_name]') namelist_answer.append('我的名字是[slot_name]。') namelist_answer.append('我大名唤作[slot_name]。') namelist_answer.append('哦,我的名字就是[slot_name]啊。') namelist_answer.append('名叫[slot_name]。') namelist_answer.append('叫[slot_name]。') namelist_answer.append('没问题,我叫[slot_name]。') namelist_answer.append('好的,名字是[slot_name]。') namelist_answer.append('我的全名就是[slot_name]。') namelist_answer.append('姓名是[slot_name]。') namelist_answer.append('[slot_name]是我的名字。') namelist_answer.append('我名叫[slot_name]。') namelist_answer.append('我是[slot_name]啊。') ''' namelist_answer.append('周杰伦。') namelist_answer.append('叫周杰伦。') namelist_answer.append('姓名是周杰伦。') namelist_answer.append('我是周杰伦。') namelist_answer.append('您好,我叫周杰伦。') namelist_answer.append('周杰伦') namelist_answer.append('我的名字是周杰伦。') namelist_answer.append('我大名唤作周杰伦。') namelist_answer.append('哦,我的名字就是周杰伦啊。') namelist_answer.append('名叫周杰伦。') namelist_answer.append('叫周杰伦。') namelist_answer.
append('没问题,我叫周杰伦。') namelist_answer.append('好的,名字是周杰伦。') namelist_answer.append('我的全名就是周杰伦。') namelist_answer.append('姓名是周杰伦。') namelist_answer.append('周杰伦是我的名字。') namelist_answer.append('我名叫周杰伦。') namelist_answer.append('我是周杰伦啊。') namelist_answer.append('我叫周杰伦') namelist_answer.append('周杰伦') namelist_answer.append('我的名字是周杰伦') namelist
_answer.append('我的姓名是周杰伦') namelist_answer.append('姓名周杰伦') namelist_answer.append('名字叫周杰伦。') namelist_answer.append('您好,我叫周杰伦。') namelist_answer.append('好的。您记一下。周杰伦。') namelist_answer.append('名是周杰伦。') namelist_answer.append('名叫周杰伦。') namelist_answer.append('我叫周杰伦。') namelist_answer.append('我是周杰伦。') namelist_answer.append('名字是周杰伦。') namelist_answer.append('我的名字是周杰伦。') namelist_answer_cut=[] for ans in namelist_answer: w_sent='' sent=jieba._lcut(ans) for word in (sent): w_sent +=' ' w_sent +=word w_sent += '\n' w_sent=w_sent.replace('周杰伦'.decode('utf8'),'[slot_name]') namelist_answer_cut.append(w_sent) pass
import dircache import os.path from sqlalchemy import create_engine,Table,Column,Integer,String,ForeignKey,MetaDa
ta from sqlalchemy.orm import mapper from sqlalchemy.orm import sessionmaker from Files import * def SearchDirectory(session,directory,whitelist): for file in dircache.listdir(directory): if file in whitelist: continue full_path=os.path.join(directory,file) if os.path.isdir(full_path): #print 'Directory',full_path SearchDirectory(session,full_path,whitelist) else: try: fd=open(full
_path) if fd.read(2)=='MZ': path_elements=full_path.split('\\') filename=path_elements[-1] version=path_elements[-2] print filename.lower(),version,full_path session.add(Files(filename,version,full_path)) fd.close() except: pass engine=create_engine('sqlite:///Files.db',echo=True) """ metadata=MetaData() FilesTable=Table('Files',metadata, Column('id',Integer,primary_key=True), Column('Filename',String), Column('Version',String), Column('FullPath',String)) mapper(Files,FilesTable) """ metadata=Base.metadata metadata.create_all(engine) Session=sessionmaker(bind=engine) session=Session() SearchDirectory(session,r'T:\mat\Projects\Binaries',['.svn']) session.commit()
import
_plotly_utils.basevalidators class VisibleValidator(_plotly_utils.basevalidators.Enumera
tedValidator): def __init__(self, plotly_name="visible", parent_name="heatmapgl", **kwargs): super(VisibleValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "info"), values=kwargs.pop("values", [True, False, "legendonly"]), **kwargs )
"""Test Ansible Syntax. This module contains tests that validate that linter does not produce errors when encountering what counts as valid Ansible syntax. """ PB_WITH_NULL_TASKS = ''' - hosts: all tasks: '''
def test_null_tasks(default_text_runn
er): """Assure we do not fail when encountering null tasks.""" results = default_text_runner.run_playbook(PB_WITH_NULL_TASKS) assert not results
def majority(array0): store = {} for i in array0: sto
re[i] = store.get(i,0) + 1
for i in store.keys(): if store[i] > len(array0)//2: return i print('No majority found')
# Copyright: (c) 2012, Jan-Piet Mens <jpmens () gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.module_utils.six import string_types from ansible.module_utils._text import to_native from ansible.module_utils.common._collections_compat import MutableMapping, MutableSet, MutableSequence from ansible.parsing.plugin_docs import read_docstring, read_docstub from ansible.parsing.yaml.loader import AnsibleLoader try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() # modules that are ok that they do not have documentation strings BLACKLIST = { 'MODULE': frozenset(('async_wrapper',)), 'CACHE': frozenset(('base',)), } def merge_fragment(target, source): for key, value in source.items(): if key in target: # assumes both structures have same type if isinstance(target[key], MutableMapping): value.update(target[key]) elif isinstance(target[key], MutableSet): value.add(target[key]) elif isinstance(target[key], MutableSequence): value = sorted(frozenset(value + target[key])) else: raise Exception("Attempt to extend a documentation fragement, invalid type for %s" % key) target[key] = value def add_fragments(doc, filename, fragment_loader): fragments = doc.pop('extends_documentation_fragment', []) if isinstance(fragments, string_types): fragments = [fragments] # Allow the module to specify a var other than DOCUMENTATION # to pull the fragment from, using dot notation as a separator for fragment_slug in fragments: fragment_slug = fragment_slug.lower() if '.' in fragment_
slug: fragment_name, fragment_var = fragment_slug.split('.', 1) fragment_var = fragment_var.upper() else: fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION' fragment_class = fragment_loader.get(fragment_name) if fragment_class is None: raise AnsibleAssertionError('fragment_class is None') fragment_yaml = getattr(fragment_class, fragment_var, '{}') fragment = AnsibleLoader(frag
ment_yaml, file_name=filename).get_single_data() if 'notes' in fragment: notes = fragment.pop('notes') if notes: if 'notes' not in doc: doc['notes'] = [] doc['notes'].extend(notes) if 'options' not in fragment: raise Exception("missing options in fragment (%s), possibly misformatted?: %s" % (fragment_name, filename)) # ensure options themselves are directly merged if 'options' in doc: try: merge_fragment(doc['options'], fragment.pop('options')) except Exception as e: raise AnsibleError("%s options (%s) of unknown type: %s" % (to_native(e), fragment_name, filename)) else: doc['options'] = fragment.pop('options') # merge rest of the sections try: merge_fragment(doc, fragment) except Exception as e: raise AnsibleError("%s (%s) of unknown type: %s" % (to_native(e), fragment_name, filename)) def get_docstring(filename, fragment_loader, verbose=False, ignore_errors=False): """ DOCUMENTATION can be extended using documentation fragments loaded by the PluginLoader from the module_docs_fragments directory. """ data = read_docstring(filename, verbose=verbose, ignore_errors=ignore_errors) # add fragments to documentation if data.get('doc', False): add_fragments(data['doc'], filename, fragment_loader=fragment_loader) return data['doc'], data['plainexamples'], data['returndocs'], data['metadata']
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Tests for the parsers CLI arguments helper.""" import argparse import unittest from plaso.cli import tools from plaso.cli.helpers import parsers from plaso.lib import errors from tests.cli import test_lib as cli_test_lib class ParsersArgumentsHelperTest(cli_test_lib.CLIToolTestCase): """Tests for the parsers CLI arguments helper.""" # pylint: disable=no-member,protected-access _EXPECTED_OUTPUT = """\ usage: cli_helper.py [--parsers PARSER_FILTER_EXPRESSION] Test argument parser. {0:s}: --parsers PARSER_FILTER_EXPRESSION Define which presets, parsers and/or plugins to use, or show possible values. The expression is a comma separated string where each element is a preset, parser or plugin name. Each element can be prepended with an exclamation ma
rk to exclude the item. Matching is case insensitive. Examples: "linux,!bash_history" enables the linux preset, without the bash_history parser. "sqlite,!sqlite/chrome_history" enables all sqlite plugins except for chrome_history". "win7,syslog" enables the win7 preset, as well as the syslog
parser. Use "--parsers list" or "--info" to list available presets, parsers and plugins. """.format(cli_test_lib.ARGPARSE_OPTIONS) def testAddArguments(self): """Tests the AddArguments function.""" argument_parser = argparse.ArgumentParser( prog='cli_helper.py', description='Test argument parser.', add_help=False, formatter_class=cli_test_lib.SortedArgumentsHelpFormatter) parsers.ParsersArgumentsHelper.AddArguments(argument_parser) output = self._RunArgparseFormatHelp(argument_parser) self.assertEqual(output, self._EXPECTED_OUTPUT) def testParseOptions(self): """Tests the ParseOptions function.""" options = cli_test_lib.TestOptions() options.parsers = 'winevt' test_tool = tools.CLITool() parsers.ParsersArgumentsHelper.ParseOptions(options, test_tool) self.assertEqual(test_tool._parser_filter_expression, options.parsers) with self.assertRaises(errors.BadConfigObject): parsers.ParsersArgumentsHelper.ParseOptions(options, None) if __name__ == '__main__': unittest.main()
# -*- codi
ng: utf-8 -*- ## This is a minimal config file for testing. TESTING=True # this file only works in test mode sql_driver="sqlite" sql_database=":memory:" ## overridden when running tests ## SECRET_KEY="fbfzkar2ihf3ulqhelg8srlzg7resibg748wifgbz478" #TRACE=True #MEDIA_PATH="/var/tmp/pybble" ## set by the test run script ADMIN_EMAIL="smurf@smurf.noris.de" UR
LFOR_ERROR_FATAL=False REDIS_HOST='localhost' REDIS_DB=3 ## a db number not used in production
import abc from default_metrics import DefaultMetrics class DefaultEnvironment(object): """ Abstract class for environments. All environments must implement these methods to be able to work with SBB. """ __metaclass__ = abc.ABCMeta def __init__(self): self.metrics_ = DefaultMetrics(self) @abc.abstractmethod def reset(self): """ Method that is called at the beginning of each run by SBB, to reset the variables that will be used by the generations. """ @abc.abstractmethod def setup(self, teams_population): """ Method that is called at the beginning of each generation by SBB, to set the variables that will be used by the generationand remove the ones that are no longer being used. """ @abc.abstractmethod def evaluate_point_population(self, teams_population): """ Evaluate the fitness of the point population, to define which points will be removed or added in the next generation, when setup_point_population() is executed. """ @abc.abstractmethod def evaluate_teams_populati
on_for_training(self, teams_population): """ Evaluate all the teams using the evaluate_team() method, and sets metrics. Used only for training. """ @abc.abstractmethod def evaluate_team(self, team, mode): """ Evaluate the team using the environment inputs. May be executed in the training or the test mode. This method must set the attribute results_per_points of the team, if you intend to
use pareto. """ @abc.abstractmethod def validate(self, current_generation, teams_population): """ For classification: - Return the best team for the teams_population using the champion set. For reinforcement: - All teams go against the validation set, and then the best one go against the champion set """ def hall_of_fame(self): return []
#!env python import os import sys sys.path.append( os.path.join( os.environ.get( "SPLUNK_HOME", "/opt/splunk/6.1.3" ), "etc/apps/framework/contrib/splunk-sdk-python/1.3.0", ) ) from collections import Counter, OrderedDict from math import log from nltk import tokenize import execnet import json from splunklib.searchcommands import Configuration, Option f
rom splunklib.searchcommands import dispatch, validators from remote_commands import OptionRemoteStreamingCommand, ValidateLocalFile @Configuration(clear_required_fields=False) class MCPredict(OptionRemoteStreamingCommand): model = Option(require=True, validate=ValidateLocalFile(mode='r',extension="pkl",subdir='classifiers',nohandle=True)) code = """ import os, sys, itertools, collections, numbers try: import cStri
ngIO as StringIO except: import StringIO import numpy as np import scipy.sparse as sp from multiclassify import process_records from gensim.models import LsiModel, TfidfModel, LdaModel from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import LabelEncoder from sklearn.externals import joblib if __name__ == "__channelexec__": args = channel.receive() records = [] for record in channel: if not record: break records.append(record) if records: records = np.array(records) # Try loading existing model try: model = joblib.load(args['model']) encoder = model['encoder'] est = model['est'] target = model['target'] fields = model['fields'] if model.get('text'): if model['text'] == 'lsi': textmodel = LsiModel.load(args['model'].replace(".pkl",".%s" % model['text'])) elif model['text'] == 'tfidf': textmodel = TfidfModel.load(args['model'].replace(".pkl",".%s" % model['text'])) else: textmodel = model['text'] except Exception as e: print >> sys.stderr, "ERROR", e channel.send({ 'error': "Couldn't find model %s" % args['model']}) else: X, y_labels, textmodel = process_records(records, fields, target, textmodel=textmodel) print >> sys.stderr, X.shape y = est.predict(X) y_labels = encoder.inverse_transform(y) for i, record in enumerate(records): record['%s_predicted' % target] = y_labels.item(i) channel.send(record) """ def __dir__(self): return ['model'] dispatch(MCPredict, sys.argv, sys.stdin, sys.stdout, __name__)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Part of the PsychoPy library # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd. # Distributed under the terms of the GNU General Public License (GPL). """This provides a basic ButtonBox class, and imports the `ioLab python library <http://github.com/ioLab/python-ioLabs>`_. """ from __future__ import absolute_import, division, print_function try: from labjack import u3 except ImportError: import u3 # Could not lo
ad the Exodriver driver # "dlopen(liblabjackusb.dylib, 6): image not found" class U3(u3.U3): def setData(self, byte, endian='big', addre
ss=6701): """Write 1 byte of data to the U3 port parameters: - byte: the value to write (must be an integer 0:255) - endian: ['big' or 'small'] ignored from 1.84 onwards; automatic? - address: the memory address to send the byte to - 6700 = FIO - 6701 (default) = EIO (the DB15 connector) - 6702 = CIO """ # Upper byte is the writemask, lower byte is the 8 lines/bits to set. # Bit 0 = line 0, bit 1 = line 1, bit 2 = line 2, etc. self.writeRegister(address, 0xFF00 + (byte & 0xFF))
import sys from services.spawn import MobileTemplate from services.spawn import WeaponTemplate from resourc
es.datatables import WeaponType from resources.datatables import Difficulty from resources.datatables import Options from java.util import Vector def a
ddTemplate(core): mobileTemplate = MobileTemplate() mobileTemplate.setCreatureName('dreaded_vir_vir') mobileTemplate.setLevel(40) mobileTemplate.setDifficulty(Difficulty.NORMAL) mobileTemplate.setMinSpawnDistance(4) mobileTemplate.setMaxSpawnDistance(8) mobileTemplate.setDeathblow(True) mobileTemplate.setScale(1) mobileTemplate.setMeatType("Avian Meat") mobileTemplate.setMeatAmount(25) mobileTemplate.setBoneType("Avian Bones") mobileTemplate.setBoneAmount(16) mobileTemplate.setSocialGroup("vir vur") mobileTemplate.setAssistRange(2) mobileTemplate.setStalker(True) mobileTemplate.setOptionsBitmask(Options.ATTACKABLE) templates = Vector() templates.add('object/mobile/shared_vir_vur.iff') mobileTemplate.setTemplates(templates) weaponTemplates = Vector() weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic') weaponTemplates.add(weapontemplate) mobileTemplate.setWeaponTemplateVector(weaponTemplates) attacks = Vector() attacks.add('bm_bite_3') mobileTemplate.setDefaultAttack('creatureMeleeAttack') mobileTemplate.setAttacks(attacks) core.spawnService.addMobileTemplate('dreaded_vir_vir', mobileTemplate) return
tion for a binary tree node. # class TreeNode(object): # def __init__(self,
x): # self.val = x #
self.left = None # self.right = None class Solution(object): def maxDepth(self, root): """ :type root: TreeNode :rtype: int """ if root: self.cand = [] self.find(0, root) return max(self.cand) else: return 0 def find(self, length, node): if node.left: self.cand.append(self.find(length + 1, node.left)) if node.right: self.cand.append(self.find(length + 1, node.right)) self.cand.append(length + 1)
from django.db import models class Writer(models.Model): alias = models.Forei
gnKey('Writer', blank=True, null=True) nick = models.CharField(unique=True, max_length=16) class War(models.Model): id = models.AutoField(primary_key=True) starttime = models.DateTimeField() endtime = models.DateTimeField() finished = models.BooleanField(default=False) def __unicode__(self): return "War %s: %s tot %s (%s minuten)" % (self.id, self.starttime.strftime("%H:%M"), self.e
ndtime.strftime("%H:%M"), (self.endtime - self.starttime).seconds / 60) class ParticipantScore(models.Model): writer = models.ForeignKey(Writer) war = models.ForeignKey(War) score = models.IntegerField(default=0, blank=True) class WriterStats(models.Model): warcount = models.IntegerField() wordcount = models.IntegerField() wpm = models.DecimalField(max_digits=5, decimal_places=2) class WarParticipants(models.Model): war = models.ForeignKey(War) participant = models.ForeignKey(Writer)
from django.core.serializers.json import DjangoJSONEnco
der from django.db import models from .fields import ( ArrayField, BigIntegerRangeField, CICharField, CIEmailField, CITextField, DateRangeField, DateTimeRangeField, FloatRangeField, HStoreField, IntegerRangeField, JSONField, SearchVectorField, ) class Tag: def __init__(self, tag_id): self.tag_id = tag_id def __eq__(self, other): return isinstance(other, Tag) and self.tag_id
== other.tag_id class TagField(models.SmallIntegerField): def from_db_value(self, value, expression, connection, context): if value is None: return value return Tag(int(value)) def to_python(self, value): if isinstance(value, Tag): return value if value is None: return value return Tag(int(value)) def get_prep_value(self, value): return value.tag_id class PostgreSQLModel(models.Model): class Meta: abstract = True required_db_vendor = 'postgresql' class IntegerArrayModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), default=[], blank=True) class NullableIntegerArrayModel(PostgreSQLModel): field = ArrayField(models.IntegerField(), blank=True, null=True) class CharArrayModel(PostgreSQLModel): field = ArrayField(models.CharField(max_length=10)) class DateTimeArrayModel(PostgreSQLModel): datetimes = ArrayField(models.DateTimeField()) dates = ArrayField(models.DateField()) times = ArrayField(models.TimeField()) class NestedIntegerArrayModel(PostgreSQLModel): field = ArrayField(ArrayField(models.IntegerField())) class OtherTypesArrayModel(PostgreSQLModel): ips = ArrayField(models.GenericIPAddressField()) uuids = ArrayField(models.UUIDField()) decimals = ArrayField(models.DecimalField(max_digits=5, decimal_places=2)) tags = ArrayField(TagField(), blank=True, null=True) class HStoreModel(PostgreSQLModel): field = HStoreField(blank=True, null=True) class CharFieldModel(models.Model): field = models.CharField(max_length=16) class TextFieldModel(models.Model): field = models.TextField() def __str__(self): return self.field # Scene/Character/Line models are used to test full text search. They're # populated with content from Monty Python and the Holy Grail. class Scene(models.Model): scene = models.CharField(max_length=255) setting = models.CharField(max_length=255) def __str__(self): return self.scene class Character(models.Model): name = models.CharField(max_length=255) def __str__(self): return self.name class CITestModel(PostgreSQLModel): name = CICharField(primary_key=True, max_length=255) email = CIEmailField() description = CITextField() def __str__(self): return self.name class Line(PostgreSQLModel): scene = models.ForeignKey('Scene', models.CASCADE) character = models.ForeignKey('Character', models.CASCADE) dialogue = models.TextField(blank=True, null=True) dialogue_search_vector = SearchVectorField(blank=True, null=True) dialogue_config = models.CharField(max_length=100, blank=True, null=True) def __str__(self): return self.dialogue or '' class RangesModel(PostgreSQLModel): ints = IntegerRangeField(blank=True, null=True) bigints = BigIntegerRangeField(blank=True, null=True) floats = FloatRangeField(blank=True, null=True) timestamps = DateTimeRangeField(blank=True, null=True) dates = DateRangeField(blank=True, null=True) class RangeLookupsModel(PostgreSQLModel): parent = models.ForeignKey(RangesModel, models.SET_NULL, blank=True, null=True) integer = models.IntegerField(blank=True, null=True) big_integer = models.BigIntegerField(blank=True, null=True) float = models.FloatField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) date = models.DateField(blank=True, null=True) class JSONModel(models.Model): field = JSONField(blank=True, null=True) field_custom = JSONField(blank=True, null=True, encoder=DjangoJSONEncoder) class Meta: required_db_features = ['has_jsonb_datatype'] class ArrayFieldSubclass(ArrayField): def __init__(self, *args, **kwargs): super().__init__(models.IntegerField()) class AggregateTestModel(models.Model): """ To test postgres-specific general aggregation functions """ char_field = models.CharField(max_length=30, blank=True) integer_field = models.IntegerField(null=True) boolean_field = models.NullBooleanField() class StatTestModel(models.Model): """ To test postgres-specific aggregation functions for statistics """ int1 = models.IntegerField() int2 = models.IntegerField() related_field = models.ForeignKey(AggregateTestModel, models.SET_NULL, null=True) class NowTestModel(models.Model): when = models.DateTimeField(null=True, default=None) class UUIDTestModel(models.Model): uuid = models.UUIDField(default=None, null=True)
''' Created on 04.10.2012 @author: michi ''' from PyQt4.QtCore import pyqtSignal from ems.qt4.applicationservice import ApplicationService #@UnresolvedImport class ModelUpdateService(Applica
tionService): objectIdsUpdated = pyqtSignal(str, list) objectsUpdated = pyqtSignal(str) modelUpdated = pyqtSignal() def triggerUpdate(self, modelObjectName, keys=None): if keys is not None: sel
f.objectIdsUpdated.emit(modelObjectName, keys) else: self.objectsUpdated.emit(modelObjectName) self.modelUpdated.emit()
# -*- coding: utf-8 -*- # Ma
thmaker creates automatically maths exercises sheets # with their answers # Copyright 2006-2018 Nicolas Hainaux <nh.techn@gmail.com> # This file is part of Mathmaker. # Mathmaker is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # any later version. # Mathmaker is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without
even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Mathmaker; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from mathmaker.lib import shared from mathmaker.lib.document.frames import Sheet def test_multi_divi_10_100_1000(): """Check this sheet is generated without any error.""" shared.machine.write_out(str(Sheet('mental_calculation', '04_yellow1', 'multi_divi_10_100_1000')), pdf_output=True) def test_multi_divi_10_100_1000_embedding_js(): """Check this sheet is generated without any error.""" shared.machine.write_out(str(Sheet('mental_calculation', '04_yellow1', 'multi_divi_10_100_1000', enable_js_form=True)), pdf_output=True)
(six.string_types, Promise)): for data_attr in self.data_attrs: data_value = html.conditional_escape( force_text(getattr(option_label, data_attr, ""))) other_html.append('data-%s="%s"' % (data_attr, data_value)) return ' '.join(other_html) def transform_option_label(self, option_label): if (not isinstance(option_label, (six.string_types, Promise)) and callable(self.transform)): option_label = self.transform(option_label) return html.conditional_escape(force_text(option_label)) def transform_option_html_attrs(self, option_label): if not callable(self.transform_html_attrs): return '' return flatatt(self.transform_html_attrs(option_label)) class ThemableSelectWidget(SelectWidget): """Bootstrap base select field widget.""" def render(self, name, value, attrs=None, choices=()): # NOTE(woodnt): Currently the "attrs" contents are being added to the # select that's hidden. It's unclear whether this is the # desired behavior. In some cases, the attribute should # remain solely on the now-hidden select. But in others # if it should live on the bootstrap button (visible) # or both. new_choices = [] initial_value = value for opt_value, opt_label in itertools.chain(self.choices, choices): other_html = self.transform_option_html_attrs(opt_label) data_attr_html = self.get_data_attrs(opt_label) if data_attr_html: other_html += ' ' + data_attr_html opt_label = self.transform_option_label(opt_label) # If value exists, save off its label for use if opt_value == value: initial_value = opt_label if other_html: new_choices.append((opt_value, opt_label, other_html)) else: new_choices.append((opt_value, opt_label)) if value is None and new_choices: initial_value = new_choices[0][1] attrs = self.build_attrs(attrs) id = attrs.pop('id', 'id_%s' % name) template = get_template('horizon/common/fields/_themable_select.html') context = Context({ 'name': name, 'options': new_choices, 'id': id, 'value': value, 'initial_value': initial_value, 'select_attrs': attrs, }) return template.render(context) class DynamicSelectWidget(SelectWidget): """A subclass of the ``Select`` widget which renders extra attributes for use in callbacks to handle dynamic changes to the available choices. """ _data_add_url_attr = "data-add-item-url" def render(self, *args, **kwargs): add_item_url = self.get_add_item_url() if add_item_url is not None: self.attrs[self._data_add_url_attr] = add_item_url return super(DynamicSelectWidget, self).render(*args, **kwargs) def get_add_item_url(self): if callable(self.add_item_link): return self.add_item_link() try: if self.add_item_link_args: return urlresolvers.reverse(self.add_item_link, args=self.add_item_link_args) else: return urlresolvers
.reverse(self.add_item_link) except urlresolvers.NoReverseMatch: return self.add_item_link class ThemableDynamicSelectWidget(ThemableSelectWidget, DynamicSelectWidget): pass class ThemableChoiceField(fields.ChoiceField): """Bootstrap based select field.""" widget = ThemableSelectWidget class DynamicChoiceField(fields.ChoiceField): """A subclass of ``
ChoiceField`` with additional properties that make dynamically updating its elements easier. Notably, the field declaration takes an extra argument, ``add_item_link`` which may be a string or callable defining the URL that should be used for the "add" link associated with the field. """ widget = DynamicSelectWidget def __init__(self, add_item_link=None, add_item_link_args=None, *args, **kwargs): super(DynamicChoiceField, self).__init__(*args, **kwargs) self.widget.add_item_link = add_item_link self.widget.add_item_link_args = add_item_link_args class ThemableDynamicChoiceField(DynamicChoiceField): widget = ThemableDynamicSelectWidget class DynamicTypedChoiceField(DynamicChoiceField, fields.TypedChoiceField): """Simple mix of ``DynamicChoiceField`` and ``TypedChoiceField``.""" pass class ThemableDynamicTypedChoiceField(ThemableDynamicChoiceField, fields.TypedChoiceField): """Simple mix of ``ThemableDynamicChoiceField`` & ``TypedChoiceField``.""" pass class ThemableCheckboxInput(widgets.CheckboxInput): """A subclass of the ``Checkbox`` widget which renders extra markup to allow a custom checkbox experience. """ def render(self, name, value, attrs=None): label_for = attrs.get('id', '') if not label_for: attrs['id'] = uuid.uuid4() label_for = attrs['id'] return html.format_html( u'<div class="themable-checkbox">{}<label for="{}"></label></div>', super(ThemableCheckboxInput, self).render(name, value, attrs), label_for ) class ThemableCheckboxChoiceInput(widgets.CheckboxChoiceInput): def render(self, name=None, value=None, attrs=None, choices=()): if self.id_for_label: label_for = html.format_html(' for="{}"', self.id_for_label) else: label_for = '' attrs = dict(self.attrs, **attrs) if attrs else self.attrs return html.format_html( u'<div class="themable-checkbox">{}<label{}>' + u'<span>{}</span></label></div>', self.tag(attrs), label_for, self.choice_label ) class ThemableCheckboxFieldRenderer(widgets.CheckboxFieldRenderer): choice_input_class = ThemableCheckboxChoiceInput class ThemableCheckboxSelectMultiple(widgets.CheckboxSelectMultiple): renderer = ThemableCheckboxFieldRenderer _empty_value = [] class ExternalFileField(fields.FileField): """A special flavor of FileField which is meant to be used in cases when instead of uploading file to Django it should be uploaded to some external location, while the form validation is done as usual. Should be paired with ExternalUploadMeta metaclass embedded into the Form class. """ def __init__(self, *args, **kwargs): super(ExternalFileField, self).__init__(*args, **kwargs) self.widget.attrs.update({'data-external-upload': 'true'}) class ExternalUploadMeta(forms.DeclarativeFieldsMetaclass): """Set this class as the metaclass of a form that contains ExternalFileField in order to process ExternalFileField fields in a specific way. A hidden CharField twin of FieldField is created which contains just the filename (if any file was selected on browser side) and a special `clean` method for FileField is defined which extracts just file name. This allows to avoid actual file upload to Django server, yet process form clean() phase as usual. Actual file upload happens entirely on client-side. """ def __new__(mcs, name, bases, attrs): def get_double_name(name): suffix = '__hidden' slen = len(suffix) return name[:-slen] if name.endswith(suffix) else name + suffix def make_clean_method(field_name): def _clean_method(self): value = self.cleaned_data[field_name] if value: self.cleaned_data[get_double_name(field_name)] = value return value return _clean_method new_attrs = {} for attr_name, attr in attrs.i
from __future_
_ import unicode_literals from .. import Provider as PhoneNumberProvider class Provider(PhoneNumberProvider): formats = ( '+90(###)#######', '+90 (###) #######', '0### #
## ## ##', '0##########', '0###-### ####', '(###)### ####', '### # ###', '+90(###)###-####x###', '+90(###)###-####x####', )
response): return () route = ("GET", "/test/test_tuple_1_rv", handler) self.server.router.register(*route) with pytest.raises(HTTPError) as cm: self.request(route[1]) assert cm.value.code == 500 def test_tuple_2_rv(self): @wptserve.handlers.handler def handler(request, response): return [("Content-Length", 4), ("test-header", "test-value")], "test data" route = ("GET", "/test/test_tuple_2_rv", handler) self.server.router.register(*route) resp = self.request(route[1]) self.assertEqual(200, resp.getcode()) self.assertEqual("4", resp.info()["Content-Length"]) self.assertEqual("test-value", resp.info()["test-header"]) self.assertEqual(b"test", resp.read()) def test_tuple_3_rv(self): @wptserve.handlers.handler def handler(request, response): return 202, [("test-header", "test-value")], "test data" route = ("GET", "/test/test_tuple_3_rv", handler) self.server.router.register(*route) resp = self.request(route[1]) self.assertEqual(202, resp.getcode()) self.assertEqual("test-value", resp.info()["test-header"]) self.assertEqual(b"test data", resp.read()) def test_tuple_3_rv_1(self): @wptserve.handlers.handler def handler(request, response): return (202, "Some Status"), [("test-header", "test-value")], "test data" route = ("GET", "/test/test_tuple_3_rv_1", handler) self.server.router.register(*route) resp = self.request(route[1]) self.assertEqual(202, resp.getcode()) self.assertEqual("Some Status", resp.msg) self.assertEqual("test-value", resp.info()["test-header"]) self.assertEqual(b"test data", resp.read()) def test_tuple_4_rv(self): @wptserve.handlers.handler def handler(request, response): return 202, [("test-header", "test-value")], "test data", "garbage" route = ("GET", "/test/test_tuple_1_rv", handler) self.server.router.register(*route) with pytest.raises(HTTPError) as cm: self.request(route[1]) assert cm.value.code == 500 def test_none_rv(self): @wptserve.handlers.handler def handler(request, response): return None route = ("GET", "/test/test_none_rv", handler) self.server.router.register(*route) resp = self.request(route[1]) assert resp.getcode() == 200 assert "Content-Length" not in resp.info() assert resp.read() == b"" class TestJSONHandler(TestUsingServer): def test_json_0(self): @wptserve.handlers.json_handler def handler(request, response): return {"data": "test data"} route = ("GET", "/test/test_json_0", handler) self.server.router.register(*route) resp = self.request(route[1]) self.assertEqual(200, resp.getcode()) self.assertEqual({"data": "test data"}, json.load(resp)) def test_json_tuple_2(self): @wptserve.handlers.json_handler def handler(request, response): return [("Test-Header", "test-value")], {"data": "test data"}
route = ("GET", "/test/test_json_tuple_2", handler) self.server.router.register(*route) resp = self.request(route[1]) self.assertEqual(200, resp.getcode()) self.assertEqual("test-value", resp.info()["test-he
ader"]) self.assertEqual({"data": "test data"}, json.load(resp)) def test_json_tuple_3(self): @wptserve.handlers.json_handler def handler(request, response): return (202, "Giraffe"), [("Test-Header", "test-value")], {"data": "test data"} route = ("GET", "/test/test_json_tuple_2", handler) self.server.router.register(*route) resp = self.request(route[1]) self.assertEqual(202, resp.getcode()) self.assertEqual("Giraffe", resp.msg) self.assertEqual("test-value", resp.info()["test-header"]) self.assertEqual({"data": "test data"}, json.load(resp)) class TestPythonHandler(TestUsingServer): def test_string(self): resp = self.request("/test_string.py") self.assertEqual(200, resp.getcode()) self.assertEqual("text/plain", resp.info()["Content-Type"]) self.assertEqual(b"PASS", resp.read()) def test_tuple_2(self): resp = self.request("/test_tuple_2.py") self.assertEqual(200, resp.getcode()) self.assertEqual("text/html", resp.info()["Content-Type"]) self.assertEqual("PASS", resp.info()["X-Test"]) self.assertEqual(b"PASS", resp.read()) def test_tuple_3(self): resp = self.request("/test_tuple_3.py") self.assertEqual(202, resp.getcode()) self.assertEqual("Giraffe", resp.msg) self.assertEqual("text/html", resp.info()["Content-Type"]) self.assertEqual("PASS", resp.info()["X-Test"]) self.assertEqual(b"PASS", resp.read()) def test_import(self): dir_name = os.path.join(doc_root, "subdir") assert dir_name not in sys.path assert "test_module" not in sys.modules resp = self.request("/subdir/import_handler.py") assert dir_name not in sys.path assert "test_module" not in sys.modules self.assertEqual(200, resp.getcode()) self.assertEqual("text/plain", resp.info()["Content-Type"]) self.assertEqual(b"PASS", resp.read()) def test_no_main(self): with pytest.raises(HTTPError) as cm: self.request("/no_main.py") assert cm.value.code == 500 def test_invalid(self): with pytest.raises(HTTPError) as cm: self.request("/invalid.py") assert cm.value.code == 500 def test_missing(self): with pytest.raises(HTTPError) as cm: self.request("/missing.py") assert cm.value.code == 404 class TestDirectoryHandler(TestUsingServer): def test_directory(self): resp = self.request("/") self.assertEqual(200, resp.getcode()) self.assertEqual("text/html", resp.info()["Content-Type"]) #Add a check that the response is actually sane def test_subdirectory_trailing_slash(self): resp = self.request("/subdir/") assert resp.getcode() == 200 assert resp.info()["Content-Type"] == "text/html" def test_subdirectory_no_trailing_slash(self): # This seems to resolve the 301 transparently, so test for 200 resp = self.request("/subdir") assert resp.getcode() == 200 assert resp.info()["Content-Type"] == "text/html" class TestAsIsHandler(TestUsingServer): def test_as_is(self): resp = self.request("/test.asis") self.assertEqual(202, resp.getcode()) self.assertEqual("Giraffe", resp.msg) self.assertEqual("PASS", resp.info()["X-Test"]) self.assertEqual(b"Content", resp.read()) #Add a check that the response is actually sane class TestH2Handler(TestUsingH2Server): def test_handle_headers(self): self.conn.request("GET", '/test_h2_headers.py') resp = self.conn.get_response() assert resp.status == 203 assert resp.headers['test'][0] == b'passed' assert resp.read() == b'' def test_only_main(self): self.conn.request("GET", '/test_tuple_3.py') resp = self.conn.get_response() assert resp.status == 202 assert resp.headers['Content-Type'][0] == b'text/html' assert resp.headers['X-Test'][0] == b'PASS' assert resp.read() == b'PASS' def test_handle_data(self): self.conn.request("POST", '/test_h2_data.py', body="hello world!") resp = self.conn.get_response() assert resp.status == 200 assert resp.read() == b'!dlrow olleh' def test_handle_headers_data(self): self.conn.request("POST", '/test_h2_headers_data.py', body="hello world!") resp = self.conn.get_response() assert resp.status == 203 assert resp.headers['test'][0] == b'passed' ass
ace = self.create_cairo_surface(fd, paper_width, paper_height) surface.set_fallback_resolution(300, 300) cr = cairo.Context(surface) fontmap = PangoCairo.font_map_new() fontmap.set_resolution(DPI) pango_context = fontmap.create_context() options = cairo.FontOptions() options.set_hint_metrics(cairo.HINT_METRICS_OFF) if is_quartz(): PangoCairo.context_set_resolution(pango_context, 72) PangoCairo.context_set_font_options(pango_context, options) layout = Pango.Layout(pango_context) PangoCairo.update_context(cr, pango_context) # paginate the document self.paginate_document(layout, page_width, page_height, DPI, DPI) body_pages = self._pages # build the table of contents and alphabetical index toc_page = None index_page = None toc = [] index = {} for page_nr, pa
ge in enumerate(body_pages):
if page.has_toc(): toc_page = page_nr if page.has_index(): index_page = page_nr for mark in page.get_marks(): if mark.type == INDEX_TYPE_ALP: if mark.key in index: if page_nr + 1 not in index[mark.key]: index[mark.key].append(page_nr + 1) else: index[mark.key] = [page_nr + 1] elif mark.type == INDEX_TYPE_TOC: toc.append([mark, page_nr + 1]) # paginate the table of contents rebuild_required = False if toc_page is not None: toc_pages = self.__generate_toc(layout, page_width, page_height, toc) offset = len(toc_pages) - 1 if offset > 0: self.__increment_pages(toc, index, toc_page, offset) rebuild_required = True if index_page and toc_page < index_page: index_page += offset else: toc_pages = [] # paginate the index if index_page is not None: index_pages = self.__generate_index(layout, page_width, page_height, index) offset = len(index_pages) - 1 if offset > 0: self.__increment_pages(toc, index, index_page, offset) rebuild_required = True if toc_page and toc_page > index_page: toc_page += offset else: index_pages = [] # rebuild the table of contents and index if required if rebuild_required: if toc_page is not None: toc_pages = self.__generate_toc(layout, page_width, page_height, toc) if index_page is not None: index_pages = self.__generate_index(layout, page_width, page_height, index) # render the pages if toc_page is not None: body_pages = body_pages[:toc_page] + toc_pages + \ body_pages[toc_page+1:] if index_page is not None: body_pages = body_pages[:index_page] + index_pages + \ body_pages[index_page+1:] self._pages = body_pages for page_nr in range(len(self._pages)): cr.save() cr.translate(left_margin, top_margin) self.draw_page(page_nr, cr, layout, page_width, page_height, DPI, DPI) cr.show_page() cr.restore() # close the surface (file) surface.finish() except IOError as msg: errmsg = "%s\n%s" % (_("Could not create %s") % filename, msg) raise ReportError(errmsg) except Exception as err: errmsg = "%s\n%s" % (_("Could not create %s") % filename, err) raise ReportError(errmsg) def __increment_pages(self, toc, index, start_page, offset): """ Increment the page numbers in the table of contents and index. """ start_page += 1 # toc/index numbers start at 1 for n, value in enumerate(toc): page_nr = toc[n][1] toc[n][1] = page_nr + (offset if page_nr > start_page else 0) for key, value in index.items(): index[key] = [page_nr + (offset if page_nr > start_page else 0) for page_nr in value] def __generate_toc(self, layout, page_width, page_height, toc): """ Generate the table of contents. """ self._doc = libcairodoc.GtkDocDocument() self._active_element = self._doc self._pages = [] write_toc(toc, self) self.paginate_document(layout, page_width, page_height, DPI, DPI) return self._pages def __generate_index(self, layout, page_width, page_height, index): """ Generate the index. """ self._doc = libcairodoc.GtkDocDocument() self._active_element = self._doc self._pages = [] write_index(index, self) self.paginate_document(layout, page_width, page_height, DPI, DPI) return self._pages def write_toc(toc, doc): """ Write the table of contents. """ if not toc: return doc.start_paragraph('TOC-Title') doc.write_text(doc.toc_title) doc.end_paragraph() doc.start_table('toc', 'TOC-Table') for mark, page_nr in toc: doc.start_row() doc.start_cell('TOC-Cell') if mark.level == 1: style_name = "TOC-Heading1" elif mark.level == 2: style_name = "TOC-Heading2" else: style_name = "TOC-Heading3" doc.start_paragraph(style_name) doc.write_text(mark.key) doc.end_paragraph() doc.end_cell() doc.start_cell('TOC-Cell') doc.start_paragraph(style_name) doc.write_text(str(page_nr)) doc.end_paragraph() doc.end_cell() doc.end_row() doc.end_table() def write_index(index, doc): """ Write the alphabetical index. """ if not index: return doc.start_paragraph('IDX-Title') doc.write_text(doc.index_title) doc.end_paragraph() doc.start_table('index', 'IDX-Table') for key in sorted(index.keys()): doc.start_row() doc.start_cell('IDX-Cell') doc.start_paragraph('IDX-Entry') doc.write_text(key) doc.end_paragraph() doc.end_cell() doc.start_cell('IDX-Cell') doc.start_paragraph('IDX-Entry') pages = [str(page_nr) for page_nr in index[key]] # TODO for Arabic, should the next line's comma be translated? doc.write_text(', '.join(pages)) doc.end_paragraph() doc.end_cell() doc.end_row() doc.end_table() #------------------------------------------------------------------------ # # PdfDoc class # #------------------------------------------------------------------------ class PdfDoc(CairoDocgen): """Render the document into PDF file using Cairo. """ def create_cairo_surface(self, fobj, width_in_points, height_in_points): return cairo.PDFSurface(fobj, width_in_
#!/usr/bin/env python # -*- coding: utf-8 -*- # This file is part of level. # https://github.com/heynemann/level # Licensed under the MIT license: # http://www.opensource.org/licenses/MIT-license # Copyright (c) 2016, Bernardo Heynemann <heynemann@gmail.com> from importer import Importer from preggy import expect from tornado.testing import gen_test from level.app import LevelApp from level.config import Config from level.json import dumps, loads from level.context import Context, ServerParameters from level.services import BaseService from tests.unit.base import TestCase, WebTestCase class AppTestCase(TestCase): def setUp(self): super(AppTestCase, self).setUp() self.server_parameters = ServerParameters( io_loop=self.io_loop, host='localhost', port=8888, config_path='./tests/fixtures/test-valid.conf', log_level='INFO', debug=True, ) self.config = Config() self.importer = Importer() self.importer.load( dict(key='service_classes', module_names=self.config.SERVICES, class_name='Service'), ) # load all modules here services = [] for service_class in self.importer.service_classes:
srv = service_class() srv.name = service_class.__module__ services.append(srv) self.importer.services = services
self.context = Context(self.server_parameters, self.config, self.importer) @gen_test async def test_can_create_app(self): app = await LevelApp.create(self.context) expect(app).not_to_be_null() expect(app.context).to_equal(self.context) @gen_test async def test_can_initialize_services(self): class TestService(BaseService): def __init__(self, *args, **kw): super(TestService, self).__init__(*args, **kw) self.initialized = False self.name = 'TestService' self.app = None async def initialize_service(self, app): await super(TestService, self).initialize_service(app) self.initialized = True s = TestService() self.context.importer.services = [s] app = LevelApp(self.context, []) expect(app).not_to_be_null() await app.initialize() expect(s.initialized).to_be_true() expect(s.app).to_equal(app) @gen_test async def test_can_get_handlers_from_services(self): class TestService(BaseService): def __init__(self): self.initialized = False self.name = 'TestService' self.app = None async def initialize_service(self, app): await super(TestService, self).initialize_service(app) self.initialized = True async def get_handlers(self): return ( ('/test', None), ) s = TestService() self.context.importer.services = [s] app = LevelApp(self.context, []) expect(app).not_to_be_null() handlers = await app.get_handlers() expect(handlers).to_length(2) expect(handlers[1]).to_be_like( ('/test', None), ) class WebSocketTestCase(WebTestCase): def setUp(self): super(WebSocketTestCase, self).setUp() class TestService(BaseService): def __init__(self): self.message = None self.name = 'TestService' async def on_message(self, message): if message['type'] == 'ping': await self.publish_message(message['socket_id'], 'pong', message['payload']) else: self.message = message self.socket_id = message['socket_id'] self.service = TestService() self.service.app = self.app self.context.importer.services = [self.service] @gen_test async def test_can_receive_open_message(self): await self.websocket_connect('/ws') expect(self.ws).not_to_be_null() await self.wait_for(lambda: self.service.message is not None) expect(self.service.socket_id).not_to_be_null() expect(self.service.message).to_be_like({ 'type': 'core.connection.open', 'socket_id': self.service.socket_id, 'payload': {}, }) @gen_test async def test_can_receive_close_message(self): await self.websocket_connect('/ws') expect(self.ws).not_to_be_null() # wait for open await self.wait_for(lambda: self.service.message is not None) self.service.message = None self.websocket_close() await self.wait_for(lambda: self.service.message is not None) expect(self.service.socket_id).not_to_be_null() expect(self.service.message).to_be_like({ 'type': 'core.connection.close', 'socket_id': self.service.socket_id, 'payload': {}, }) @gen_test async def test_can_receive_message(self): await self.websocket_connect('/ws') expect(self.ws).not_to_be_null() await self.ws.write_message(dumps({ 'type': 'custom.message', 'qwe': 123, })) await self.wait_for(lambda: self.service.message is not None and self.service.message['type'] == 'custom.message') expect(self.service.socket_id).not_to_be_null() expect(self.service.message).to_be_like({ 'type': 'custom.message', 'socket_id': self.service.socket_id, 'payload': { 'qwe': 123, }, }) @gen_test async def test_can_publish_message(self): await self.websocket_connect('/ws') expect(self.ws).not_to_be_null() await self.write_ws_message(dumps({ 'type': 'ping', 'msg': 'woot?!', })) response = await self.read_ws_message() expect(response).not_to_be_null() obj = loads(response) expect(obj).to_equal({ 'type': 'pong', 'socket_id': self.service.socket_id, 'payload': { 'msg': 'woot?!', } })
# -*- coding: utf-8 -*- """Sh
ared model fields and defaults.""" import binascii import os def default_token(): """Generate defau
lt value for token field.""" return binascii.hexlify(os.urandom(20)).decode()
# moosetree.py --- # # Filename: moosetree.py # Description: # Author: subhasis ray # Maintainer: # Created: Tue Jun 23 18:54:14 2009 (+0530) # Version: # Last-Updated: Sun Jul 5 01:35:11 2009 (+0530) # By: subhasis ray # Update #: 137 # URL: # Keywords: # Compatibility: # # # Commentary: # # # # # Change log: # # # # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, Fifth # Floor, Boston, MA 02110-1301, USA. # # # Code: import moose import sys from PyQt4 import QtCore, QtGui class MooseTreeItem(QtGui.QTreeWidgetItem): def __init__(self, *args): QtGui.QTreeWidgetItem.__init__(self, *args) self.mooseObj_ = None def setMooseObject(self, mooseObject): if isinstance(mooseObject, moose.Id): self.mooseObj_ = moose.Neutral(mooseObject) elif isinstance(mooseObject, moose.PyMooseBase): self.mooseObj_ = mooseObject else: raise Error self.setText(0, QtCore.QString(self.mooseObj_.name)) self.setToolTip(0, QtCore.QString('class:' + self.mooseObj_.className)) def getMooseObject(self): return self.mooseObj_ def updateSlot(self, text): self.setText(0, QtCore.QString(self.mooseObj_.name)) class MooseTreeWidget(QtGui.QTreeWidget): def __init__(self, *args): QtGui.QTreeWidget.__init__(self, *args) self.rootObject = moose.Neutral('/') self.itemList = [] self.setupTree(self.rootObject, self, self.itemList) self.setCurrentItem(self.itemList[0]) # Make root the default item def setupTree(self, mooseObject, parent, itemlist): item = MooseTreeItem(parent) item.setMooseObject(mooseObject) itemlist.append(item) for child in mooseObject.children(): childObj = moose.Neutral(child) self.setupTree(childObj, item, itemlist) return item def recreateTree(self): self.clear() self.itemList = [] self.setupTree(moose.Neutral('/'), self, self.itemList) def insertMo
oseObjectSlot(self, class_name): try: class_name = str(class_name) class_obj = eval('moose.' + class_name) current = self.currentItem() new_item = MooseTreeItem(current) parent = current.getMooseObject() # print 'creating new', class_name, 'under', parent.path new_obj = class_obj(class_name, p
arent) new_item.setMooseObject(new_obj) current.addChild(new_item) self.itemList.append(new_item) except AttributeError: print class_name, ': no such class in module moose' if __name__ == '__main__': c = moose.Compartment("c") d = moose.HHChannel("chan", c) app = QtGui.QApplication(sys.argv) widget = MooseTreeWidget() # widget = QtGui.QTreeWidget() # items = [] # root = moose.Neutral('/') # parent = widget # item = setupTree(root, widget, items) # while stack: # mooseObject = stack.pop() # item = QtGui.QTreeWidgetItem(parent) # item.setText(0, widget.tr(mooseObject.name)) # parent = item # for child in mooseObject.children(): # stack.append(moose.Neutral(child)) widget.show() sys.exit(app.exec_()) # # moosetree.py ends here
# -*- encodi
ng: utf-8 -*- { 'name': 'Account Bank Statement Import', 'category': 'Banking addons', 'version': '8.0.1.0.1', 'author': 'OpenERP SA,' 'Odoo Community Association (OCA)', 'website': 'https://github.com/OCA/bank-statement-import', 'depends': ['account'], 'data': [
"views/account_config_settings.xml", 'views/account_bank_statement_import_view.xml', ], 'demo': [ 'demo/fiscalyear_period.xml', 'demo/partner_bank.xml', ], 'auto_install': False, 'installable': False, }
import os import ycm_core flags = [ '-Wall', '-Wextra', '-Werror', '-x', 'c', '-Iinclude', ] # Set this to the absolute path to the folder (NOT the file!) containing the # compile_commands.json file to use that instead of 'flags'. See here for # more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html # # Most projects will NOT need to set this to anything; you can just change the # 'flags' list of com
pilation flags. Notice that YCM itself uses that approach. compilation_database_folder = 'obj' if os.path.exists( compilation_database_folder ): database = ycm_core.CompilationDatabase( compilation_database_folder ) else: database = None SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ] def Directo
ryOfThisScript(): return os.path.dirname( os.path.abspath( __file__ ) ) def MakeRelativePathsInFlagsAbsolute( flags, working_directory ): if not working_directory: return list( flags ) new_flags = [] make_next_absolute = False path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ] for flag in flags: new_flag = flag if make_next_absolute: make_next_absolute = False if not flag.startswith( '/' ): new_flag = os.path.join( working_directory, flag ) for path_flag in path_flags: if flag == path_flag: make_next_absolute = True break if flag.startswith( path_flag ): path = flag[ len( path_flag ): ] new_flag = path_flag + os.path.join( working_directory, path ) break if new_flag: new_flags.append( new_flag ) return new_flags def IsHeaderFile( filename ): extension = os.path.splitext( filename )[ 1 ] return extension in [ '.h', '.hxx', '.hpp', '.hh' ] def GetCompilationInfoForFile( filename ): # The compilation_commands.json file generated by CMake does not have entries # for header files. So we do our best by asking the db for flags for a # corresponding source file, if any. If one exists, the flags for that file # should be good enough. if IsHeaderFile( filename ): basename = os.path.splitext( filename )[ 0 ] for extension in SOURCE_EXTENSIONS: replacement_file = basename + extension if os.path.exists( replacement_file ): compilation_info = database.GetCompilationInfoForFile( replacement_file ) if compilation_info.compiler_flags_: return compilation_info return None return database.GetCompilationInfoForFile( filename ) def FlagsForFile( filename, **kwargs ): if database: # Bear in mind that compilation_info.compiler_flags_ does NOT return a # python list, but a "list-like" StringVec object compilation_info = GetCompilationInfoForFile( filename ) if not compilation_info: return None final_flags = MakeRelativePathsInFlagsAbsolute( compilation_info.compiler_flags_, compilation_info.compiler_working_dir_ ) # NOTE: This is just for YouCompleteMe; it's highly likely that your project # does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR # ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT. try: final_flags.remove( '-stdlib=libc++' ) except ValueError: pass else: relative_to = DirectoryOfThisScript() final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to ) return { 'flags': final_flags, 'do_cache': True }
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2015 Therp BV <http://therp.nl>. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICU
LAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import
ir_ui_view from . import event_registration
# -*- coding: utf-8 -*- fro
m openerp.tests import common class TestProjectScrum(common.TransactionCase): def test_project_scrum(self) env = self.env record = env['project_scrum.0'].create({})
from flask_wtf import Form from wtforms import TextField, DecimalField, TextAreaField, DateField, validators, PasswordField, BooleanField class CommentForm(Form): text = TextField('Title', [validators.Required()]) text2 = TextAreaField('Body') longitude = DecimalField('Longitude') latitude = DecimalField('Longitude') date = Date
Field('Date') class SignupForm(Form): username = TextField('Username', [validators.Required()]) password = PasswordField('Password', [validators.Required(), validators.EqualTo('confirm', message='Passwords must match')]) confirm = PasswordField('Confirm Password', [validators.Required()]) email = TextField('eMail', [
validators.Required(),validators.Email()]) #accept_tos = BooleanField('I accept the TOS', [validators.Required]) class LoginForm(Form): username = TextField('Username', [validators.Required()]) password = PasswordField('Password', [validators.Required()]) class PasswordResetForm(Form): username = TextField('Username') email = TextField('eMail') class PasswordChangeForm(Form): password = PasswordField('Password', [validators.Required()])
# test MicroPython-specific features of struct try: import ustruct as struct except: try: import struct except ImportError: import sys print("SKIP") sys.exit() class A():
pass # pa
ck and unpack objects o = A() s = struct.pack("<O", o) o2 = struct.unpack("<O", s) print(o is o2[0])
"""Example code using Python threads. Copyright
2010 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from threading import Thread from time import sleep def counter(xs, delay=1): for x in xs: print x sleep(delay) # one thread counts backwards, fast t = Thread(target=counter, ar
gs=[range(100, 1, -1), 0.25]) t.start() # the other thread count forwards, slow counter(range(1, 100), 1)
import os import time import json import pprint from util import hook def readConfig(): ### Read config json and parse it confJson = None with open(os.getcwd() + '/antiFloodBotConfig.
json', 'r') as confFile: confJson = confFile.read() return json.loads(confJson) inputs = {} #store time (unixtimestamp in sec) of every entry sent by user in map where key is user nickname kicked = [] #store nicknames of kicked users conf = readConfig() timeIntervalScope = conf['timeIntervalScope'] # interval when entries are collected [sec] entryThreshold = conf['entryThreshold'] #how many
entries are allowed in timeIntervalScope logFile = conf['logFile'] @hook.event('PRIVMSG') def antiFlood(inp, nick=None, msg=None, conn=None, chan=None): if (nick not in inputs): inputs[nick] = [] currentTime = time.time() timeThreshold = currentTime - timeIntervalScope inputs[nick].append(currentTime) inputs[nick] = filter(lambda x: x > timeThreshold, inputs[nick]) #filter out every entry older than 8 sec (threshold) if len(inputs[nick]) >= entryThreshold: #if user has good day, kick one explanationMessage = conf['kickMessage'] file = open(logFile, 'a') file.write('Trying to kick %s on channel %s \n' % (nick, chan)) if nick in kicked: explanationMessage = conf['banMessage'] out = "MODE %s +b %s" % (chan, nick) conn.send(out) file.write('%s is kicked with ban \n' % (nick)) out = "KICK %s %s : %s" % (chan, nick, explanationMessage) conn.send(out) kicked.append(nick) file.close() #todo #if the same user joins again within 24 hour and keeps spamming temp ban in XX time. #step 3) if the same user joins after the removal of the ban and spams, permanent ban. @hook.event('PRIVMSG') def paramDump(inp, nick=None, msg=None, conn=None, chan=None): def saveToFile(file, label, obj): file.write("===== " + label + " ======== \n") file.write("type " + str(type (obj)) + " ========\n") file.write("methods " + str(dir(obj)) + " ========\n") file.write("properties ========\n") pprint.pprint(obj, file) file.write("\n\n\n") file = open(logFile, 'a') saveToFile(file, "inp", inp) saveToFile(file, "nick", nick) saveToFile(file, "msg", msg) saveToFile(file, "chan", chan) saveToFile(file, "conn", conn) file.close() @hook.event("004") def onConnect(param, conn=None, raw=None): conn.send("Antiflod bot is ready")
import random import unittest from minecraftd.common import tmux_id """ def tmux_id(id_list): random.seed() new_id = random.randint(1,100) while new_id in id_list: new_id = random.randint(1,100)
return new_id """ class CommonTest(unittest.TestCase): def setUp(sel
f): self.id_list = random.sample(range(1000), 10) def test(self): new_id_list = list(self.id_list) print(new_id_list) old_id_list = list(self.id_list) print(old_id_list) new_id = tmux_id(self.id_list) print(new_id) new_id_list.append(new_id) print(self.id_list) print(new_id_list) print(old_id_list) self.assertEqual(len(set(new_id_list)), (len(set(old_id_list)) + 1)) if __name__ == '__main__': unittest.main()
l=int(input()) d=list(input().split()) s={0:0} f
or i in range(l): for j in range(i+1,l): diff=abs(int(d[i
])-int(d[j])) if diff not in s: s[abs(diff)]=1 else: s[diff]=s[diff]+1 f =lambda x:print(str(x)+" "+str(s[x])) f(max(s.keys()))
#!/usr/bin/env python # -*- coding: utf-8 -*- def problem05(nrlst, oplst, answer): if len(nrlst) == 0: return [] else: exprlst = [] def _problem05(expr, i):
if i < len(nrlst): for op in oplst: _problem05(expr + op + str(nrlst[i]), i + 1) elif eval(expr) =
= answer: exprlst.append(expr) _problem05(str(nrlst[0]), 1) return exprlst if __name__ == '__main__': ANSWER = 100 for expr in problem05(range(1, 10), [' + ', ' - ', ''], ANSWER): print expr, '=', ANSWER
# -*- coding: utf-8 -*- ''' Manage running applications. Similar to `ps`, you can treat running applications as unix processes. On OS X, there is a higher level Cocoa functionality (see NSApplication) which responds to events sent through the notification center. This module operates at that level. :maintainer: Mosen <mosen@github.com> :maturity: beta :depends: objc :platform: darwin ''' import logging import salt.utils log = logging.getLogger(__name__) __virtualname__ = 'app' HAS_LIBS = False try: from Cocoa import NSWorkspace HAS_LIBS = True except ImportError: log.debug('Execution module not suitable because one or more imports failed.') def __virtual__(): ''' Only load module if we are running on OS X. ''' return __virtualname__ if HAS_LIBS else False def quit(appname, blocking=False): ''' Ask an application to quit. Does not guarantee that the application will quit without user interaction. Does not block until the application quits. CLI Example:: salt '*' app.quit 'Safari' ''' workSpace = NSWorkspace.sharedWorkspace() applications = workSpace.runningApplications() for app in applications: if app.localizedName() == appname: acknowledged = app.terminate() return acknowledged return None def force_quit(appname, blocking=False): ''' Force an application to quit aka `Force Quit`. Does not block until the
application quits. CLI Example:: salt '*' app.force_quit 'Safari' ''' workSpace = NSWorkspace.sharedWorkspace() applications = workSpace.runningApplications() for app in applications: if app.localizedName() == appname: acknowledged = app.forceTerminate() return acknowledged return None def launch(application): ''' Open an Application by name. This does not need to be the full path to
the application, and does not need to have an .app extension. CLI Example:: salt '*' app.launch 'TextEdit' ''' workSpace = NSWorkspace.sharedWorkspace() status = workSpace.launchApplication_(application) return status def processes(): ''' Get a list of running processes in the user session TODO: optional get by bundle ID TODO: optional get hidden ''' workSpace = NSWorkspace.sharedWorkspace() appList = workSpace.runningApplications() names = [app.localizedName() for app in appList] names.sort() return names def frontmost(): ''' Get the name of the frontmost application ''' workSpace = NSWorkspace.sharedWorkspace() app = workSpace.frontmostApplication() return app.localizedName()
# Copyright (C) 2007, Red Hat, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. """ The animator module provides a simple framwork to create animations. Example: Animate the size of a window:: from gi.repository import Gtk from sugar3.graphics.animator import Animator, Animation # Construct a 5 second animator animator = Animator(5) # Construct a window to animate w = Gtk.Window() w.connect('destroy', Gtk.main_quit) # Start the animation when the window is shown w.connect('realize', lambda self: animator.start()) w.show() # Create an animation subclass to animate the widget class SizeAnimation(Animation): def __init__(self): # Tell the animation to give us values between 20 and # 420 during the animation
Animation.__init__(self, 20, 420) def next_frame(self, frame): size = int(frame) w.resize(size, size) # Add the animation the the animator animation = SizeAnimation() animator.add(animation) # The animation needs to run inside a GObject main loop Gtk.main() STABLE. """ import time from gi.repository import GObject
from gi.repository import GLib EASE_OUT_EXPO = 0 EASE_IN_EXPO = 1 class Animator(GObject.GObject): ''' The animator class manages the the timing for calling the animations. The animations can be added using the `add` function and then started with the `start` function. If multiple animations are added, then they will be played back at the same time and rate as each other. The `completed` signal is emitted upon the completion of the animation and also when the `stop` function is called. Args: duration (float): the duration of the animation in seconds fps (int, optional): the number of animation callbacks to make per second (frames per second) easing (int): the desired easing mode, either `EASE_OUT_EXPO` or `EASE_IN_EXPO` .. note:: When creating an animation, take into account the limited cpu power on some devices, such as the XO. Setting the fps too high on can use signifigant cpu usage on the XO. ''' __gsignals__ = { 'completed': (GObject.SignalFlags.RUN_FIRST, None, ([])), } def __init__(self, duration, fps=20, easing=EASE_OUT_EXPO): GObject.GObject.__init__(self) self._animations = [] self._duration = duration self._interval = 1.0 / fps self._easing = easing self._timeout_sid = 0 self._start_time = None def add(self, animation): ''' Add an animation to this animator Args: animation (:class:`sugar3.graphics.animator.Animation`): the animation instance to add ''' self._animations.append(animation) def remove_all(self): ''' Remove all animations and stop this animator ''' self.stop() self._animations = [] def start(self): ''' Start the animation running. This will stop and restart the animation if the animation is currently running ''' if self._timeout_sid: self.stop() self._start_time = time.time() self._timeout_sid = GLib.timeout_add( int(self._interval * 1000), self._next_frame_cb) def stop(self): ''' Stop the animation and emit the `completed` signal ''' if self._timeout_sid: GObject.source_remove(self._timeout_sid) self._timeout_sid = 0 self.emit('completed') def _next_frame_cb(self): current_time = min(self._duration, time.time() - self._start_time) current_time = max(current_time, 0.0) for animation in self._animations: animation.do_frame(current_time, self._duration, self._easing) if current_time == self._duration: self.stop() return False else: return True class Animation(object): ''' The animation class is a base class for creating an animation. It should be subclassed. Subclasses should specify a `next_frame` function to set the required properties based on the animation progress. The range of the `frame` value passed to the `next_frame` function is defined by the `start` and `end` values. Args: start (float): the first `frame` value for the `next_frame` method end (float): the last `frame` value for the `next_frame` method .. code-block:: python # Create an animation subclass class MyAnimation(Animation): def __init__(self, thing): # Tell the animation to give us values between 0.0 and # 1.0 during the animation Animation.__init__(self, 0.0, 1.0) self._thing = thing def next_frame(self, frame): # Use the `frame` value to set properties self._thing.set_green_value(frame) ''' def __init__(self, start, end): self.start = start self.end = end def do_frame(self, t, duration, easing): ''' This method is called by the animtor class every frame. This method calculated the `frame` value to then call `next_frame`. Args: t (float): the current time elapsed of the animation in seconds duration (float): the length of the animation in seconds easing (int): the easing mode passed to the animator ''' start = self.start change = self.end - self.start if t == duration: # last frame frame = self.end else: if easing == EASE_OUT_EXPO: frame = change * (-pow(2, -10 * t / duration) + 1) + start elif easing == EASE_IN_EXPO: frame = change * pow(2, 10 * (t / duration - 1)) + start self.next_frame(frame) def next_frame(self, frame): ''' This method is called every frame and should be overriden by subclasses. Args: frame (float): a value between `start` and `end` representing the current progress in the animation ''' pass
mode. n_features : int, default=None The number of features to use. If None, it will be inferred from the maximum column index occurring in any of the files. This can be set to a higher value than the actual number of features in any of the input files, but setting it to a lower value will cause an exception to be raised. dtype : numpy data type, default=np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. multilabel : bool, default=False Samples may have several labels each (see
ERROR: type should be string, got " https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)\n\n zero_based : bool or \"auto\", default=\"auto\"\n Whether column indices in f are zero-based (True) or one-based\n (False). If column indices are one-based, they are transformed to\n zero-based to match Python/NumPy conventions.\n "
If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe when no offset or length is passed. If offset or length are passed, the "auto" mode falls back to zero_based=True to avoid having the heuristic check yield inconsistent results on different segments of the file. query_id : bool, default=False If True, will return the query_id array for each file. offset : int, default=0 Ignore the offset first bytes by seeking forward, then discarding the following bytes up until the next new line character. length : int, default=-1 If strictly positive, stop reading any new line of data once the position in the file has reached the (offset + length) bytes threshold. Returns ------- [X1, y1, ..., Xn, yn] where each (Xi, yi) pair is the result from load_svmlight_file(files[i]). If query_id is set to True, this will return instead [X1, y1, q1, ..., Xn, yn, qn] where (Xi, yi, qi) is the result from load_svmlight_file(files[i]) Notes ----- When fitting a model to a matrix X_train and evaluating it against a matrix X_test, it is essential that X_train and X_test have the same number of features (X_train.shape[1] == X_test.shape[1]). This may not be the case if you load the files individually with load_svmlight_file. See Also -------- load_svmlight_file """ if (offset != 0 or length > 0) and zero_based == "auto": # disable heuristic search to avoid getting inconsistent results on # different segments of the file zero_based = True if (offset != 0 or length > 0) and n_features is None: raise ValueError("n_features is required when offset or length is specified.") r = [ _open_and_load( f, dtype, multilabel, bool(zero_based), bool(query_id), offset=offset, length=length, ) for f in files ] if ( zero_based is False or zero_based == "auto" and all(len(tmp[1]) and np.min(tmp[1]) > 0 for tmp in r) ): for _, indices, _, _, _ in r: indices -= 1 n_f = max(ind[1].max() if len(ind[1]) else 0 for ind in r) + 1 if n_features is None: n_features = n_f elif n_features < n_f: raise ValueError( "n_features was set to {}, but input file contains {} features".format( n_features, n_f ) ) result = [] for data, indices, indptr, y, query_values in r: shape = (indptr.shape[0] - 1, n_features) X = sp.csr_matrix((data, indices, indptr), shape) X.sort_indices() result += X, y if query_id: result.append(query_values) return result def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id): X_is_sp = int(hasattr(X, "tocsr")) y_is_sp = int(hasattr(y, "tocsr")) if X.dtype.kind == "i": value_pattern = "%d:%d" else: value_pattern = "%d:%.16g" if y.dtype.kind == "i": label_pattern = "%d" else: label_pattern = "%.16g" line_pattern = "%s" if query_id is not None: line_pattern += " qid:%d" line_pattern += " %s\n" if comment: f.write( ( "# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__ ).encode() ) f.write( ("# Column indices are %s-based\n" % ["zero", "one"][one_based]).encode() ) f.write(b"#\n") f.writelines(b"# %s\n" % line for line in comment.splitlines()) for i in range(X.shape[0]): if X_is_sp: span = slice(X.indptr[i], X.indptr[i + 1]) row = zip(X.indices[span], X.data[span]) else: nz = X[i] != 0 row = zip(np.where(nz)[0], X[i, nz]) s = " ".join(value_pattern % (j + one_based, x) for j, x in row) if multilabel: if y_is_sp: nz_labels = y[i].nonzero()[1] else: nz_labels = np.where(y[i] != 0)[0] labels_str = ",".join(label_pattern % j for j in nz_labels) else: if y_is_sp: labels_str = label_pattern % y.data[i] else: labels_str = label_pattern % y[i] if query_id is not None: feat = (labels_str, query_id[i], s) else: feat = (labels_str, s) f.write((line_pattern % feat).encode("ascii")) def dump_svmlight_file( X, y, f, *, zero_based=True, comment=None, query_id=None, multilabel=False ): """Dump the dataset in svmlight / libsvm file format. This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)] Target values. Class labels must be an integer or float, or array-like objects of integer or float for multilabel classifications. f : string or file-like in binary mode If string, specifies the path that will contain the data. If file-like, data will be written to f. f should be opened in binary mode. zero_based : boolean, default=True Whether column indices should be written zero-based (True) or one-based (False). comment : string, default=None Comment to insert at the top of the file. This should be either a Unicode string, which will be encoded as UTF-8, or an ASCII byte string. If a comment is given, then it will be preceded by one that identifies the file as having been dumped by scikit-learn. Note that not all tools grok comments in SVMlight files. query_id : array-like of shape (n_samples,), default=None Array containing pairwise preference constraints (qid in svmlight format). multilabel : boolean, default=False Samples may have several labels each (see https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) .. versionadded:: 0.17 parameter *multilabel* to support multilabel datasets. """ if comment is not None: # Convert comment string to list of lines in UTF-8. # If a byte string is passed, then check whether it's ASCII; # if a user wants to get fancy, they'll have to decode themselves. # Avoid mention of str and unicode types for Python 3.x compat. if isinstance(comment, bytes): comment.decode("ascii") # just for the exce
"legal", "community", ) ) RESERVED_PROJECT_SLUGS = frozenset( ( "api-keys", "audit-log", "auth", "members", "projects", "rate-limits", "repos", "settings", "teams", "billing", "payments", "legal", "subscription", "support", "integrations", "developer-settings", "usage", ) ) LOG_LEVELS = { logging.NOTSET: "sample", logging.DEBUG: "debug", logging.INFO: "info", logging.WARNING: "warning", logging.ERROR: "error", logging.FATAL: "fatal", } DEFAULT_LOG_LEVEL = "error" DEFAULT_LOGGER_NAME = "" LOG_LEVELS_MAP = {v: k for k, v in six.iteritems(LOG_LEVELS)} # Default alerting threshold values DEFAULT_ALERT_PROJECT_THRESHOLD = (500, 25) # 500%, 25 events DEFAULT_ALERT_GROUP_THRESHOLD = (1000, 25) # 1000%, 25 events # Default sort option for the group stream DEFAULT_SORT_OPTION = "date" # Setup languages for only available locales _language_map = dict(settings.LANGUAGES) LANGUAGES = [(k, _language_map[k]) for k in get_all_languages() if k in _language_map] del _language_map # TODO(dcramer): We eventually want to make this user-editable TAG_LABELS = { "exc_type": "Exception Type", "sentry:user": "User", "sentry:release": "Release", "sentry:dist": "Distribution", "os": "OS", "url": "URL", "server_name": "Server", } PROTECTED_TAG_KEYS = frozenset(["environment", "release", "sentry:release"]) # Don't use this variable directly. If you want a list of rules that are registered in # the system, access them via the `rules` registry in sentry/rules/__init__.py _SENTRY_RULES = ( "sentry.mail.actions.NotifyEmailAction", "sentry.rules.actions.notify_event.NotifyEventAction", "sentry.rules.actions.notify_event_service.NotifyEventServiceAction", "sentry.rules.conditions.every_event.EveryEventCondition", "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition", "sentry.rules.conditions.regression_event.RegressionEventCondition", "sentry.rules.conditions.reappeared_event.ReappearedEventCondition", "sentry.rules.conditions.tagged_event.TaggedEventCondition", "sentry.rules.conditions.event_frequency.EventFrequencyCondition", "sentry.rules.conditions.event_frequency.EventUniqueUserFrequencyCondition", "sentry.rules.conditions.event_attribute.EventAttributeCondition", "sentry.rules.conditions.level.LevelCondition", "sentry.rules.filters.age_comparison.AgeComparisonFilter", "sentry.rules.filters.issue_occurrences.IssueOccurrencesFilter", "sentry.rules.filters.assigned_to.AssignedToFilter", "sentry.rules.filters.latest_release.LatestReleaseFilter", # The following filters are duplicates of their respective conditions and are conditionally shown if the user has issue alert-filters "sentry.rules.filters.event_attribute.EventAttributeFilter", "sentry.rules.filters.tagged_event.TaggedEventFilter", "sentry.rules.filters.level.LevelFilter", ) MIGRATED_CONDITIONS = frozenset( [ "sentry.rules.conditions.tagged_event.TaggedEventCondition", "sentry.rules.conditions.event_attribute.EventAttributeCondition", "sentry.rules.conditions.level.LevelCondition", ] ) TICKET_ACTIONS
= frozenset( [ "sentry.integrations.jira.notify_action.JiraCreateTicketAction", "sentry.integrations.vsts.notify_action.AzureDevopsCreateTicketAction", ] ) # methods as defined by http://www.w3.org
/Protocols/rfc2616/rfc2616-sec9.html + PATCH HTTP_METHODS = ("GET", "POST", "PUT", "OPTIONS", "HEAD", "DELETE", "TRACE", "CONNECT", "PATCH") # See https://github.com/getsentry/relay/blob/master/relay-general/src/protocol/constants.rs VALID_PLATFORMS = sentry_relay.VALID_PLATFORMS OK_PLUGIN_ENABLED = _("The {name} integration has been enabled.") OK_PLUGIN_DISABLED = _("The {name} integration has been disabled.") OK_PLUGIN_SAVED = _("Configuration for the {name} integration has been saved.") WARN_SESSION_EXPIRED = "Your session has expired." # TODO: translate this # Maximum length of a symbol MAX_SYM = 256 # Known debug information file mimetypes KNOWN_DIF_FORMATS = { "text/x-breakpad": "breakpad", "application/x-mach-binary": "macho", "application/x-elf-binary": "elf", "application/x-dosexec": "pe", "application/x-ms-pdb": "pdb", "text/x-proguard+plain": "proguard", "application/x-sentry-bundle+zip": "sourcebundle", } NATIVE_UNKNOWN_STRING = "<unknown>" # Maximum number of release files that can be "skipped" (i.e., maximum paginator offset) # inside release files API endpoints. # If this number is too large, it may cause problems because of inefficient # LIMIT-OFFSET database queries. # These problems should be solved after we implement artifact bundles workflow. MAX_RELEASE_FILES_OFFSET = 20000 # to go from an integration id (in _platforms.json) to the platform # data, such as documentation url or humanized name. # example: java-logback -> {"type": "framework", # "link": "https://docs.sentry.io/clients/java/integrations/#logback", # "id": "java-logback", # "name": "Logback"} INTEGRATION_ID_TO_PLATFORM_DATA = {} def _load_platform_data(): INTEGRATION_ID_TO_PLATFORM_DATA.clear() data = load_doc("_platforms") if not data: return for platform in data["platforms"]: integrations = platform.pop("integrations") if integrations: for integration in integrations: integration_id = integration.pop("id") if integration["type"] != "language": integration["language"] = platform["id"] INTEGRATION_ID_TO_PLATFORM_DATA[integration_id] = integration _load_platform_data() # special cases where the marketing slug differs from the integration id # (in _platforms.json). missing values (for example: "java") should assume # the marketing slug is the same as the integration id: # javascript, node, python, php, ruby, go, swift, objc, java, perl, elixir MARKETING_SLUG_TO_INTEGRATION_ID = { "kotlin": "java", "scala": "java", "spring": "java", "android": "java-android", "react": "javascript-react", "angular": "javascript-angular", "angular2": "javascript-angular2", "ember": "javascript-ember", "backbone": "javascript-backbone", "vue": "javascript-vue", "express": "node-express", "koa": "node-koa", "django": "python-django", "flask": "python-flask", "sanic": "python-sanic", "tornado": "python-tornado", "celery": "python-celery", "rq": "python-rq", "bottle": "python-bottle", "pythonawslambda": "python-awslambda", "pyramid": "python-pyramid", "pylons": "python-pylons", "laravel": "php-laravel", "symfony": "php-symfony2", "rails": "ruby-rails", "sinatra": "ruby-sinatra", "dotnet": "csharp", } # to go from a marketing page slug like /for/android/ to the integration id # (in _platforms.json), for looking up documentation urls, etc. def get_integration_id_for_marketing_slug(slug): if slug in MARKETING_SLUG_TO_INTEGRATION_ID: return MARKETING_SLUG_TO_INTEGRATION_ID[slug] if slug in INTEGRATION_ID_TO_PLATFORM_DATA: return slug # special cases where the integration sent with the SDK differ from # the integration id (in _platforms.json) # {PLATFORM: {INTEGRATION_SENT: integration_id, ...}, ...} PLATFORM_INTEGRATION_TO_INTEGRATION_ID = { "java": {"java.util.logging": "java-logging"}, # TODO: add more special cases... } # to go from event data to the integration id (in _platforms.json), # for example an event like: # {"platform": "java", # "sdk": {"name": "sentry-java", # "integrations": ["java.util.logging"]}} -> java-logging def get_integration_id_for_event(platform, sdk_name, integrations): if integrations: for integration in integrations: # check special cases if ( platform in PLATFORM_INTEGRATION_TO_INTEGRATION_ID and integ
#!/usr/bin/env python # coding=utf-8 import sys import argparse parser = argparse.ArgumentParser( description='convert a non-standord hostname like xx-xx-[1-3] to a ' 'expansion state', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" Sample: $ ./converter.py xxx-xxx-\[1-3\] xxx-xxx-1 xxx-xxx-2 xxx-xxx-3 Tips: You can pass many args behind the command,and you need to not forget to escape the character of [ and ] """) parser.add_argument( 'hostname_pattern', help='', type=str, nargs='+') args = parser.parse_args() if __name_
_ == '__main__': for arg in args.hostname_pattern: basestr=arg.split('-') prefix='-'.join(basestr[:-2]) range_li=basestr[-2:] start_num=int(range_li[0][1:]) end_num=int(range_li[1][:-1]) for i in range(start_num,end_num+1):
print prefix + '-' + str(i)
class Solution: def findDuplicate(self, nums: List[int]) -> int: slow = nums[0] fast = nums[nums[0]] while slow != fast: slow = nums[slow]
fast = nums[nums[fast]] slow2 = 0 while slow !=
slow2: slow = nums[slow] slow2 = nums[slow2] return slow
# Copyright 2014, Doug Wiegley, A10 Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import acos_client.errors as acos_errors import base class Partition(base.BaseV21): def exists(self, name): if name == 'shared': return True try: self._post("system.partition.search", {'name': name}) return True except acos_errors.NotFound: return False def active(self, name='shared'): if self.client.current_partition != name: self._post("system.partition.a
ctive", {'name': name}) self.client.current_partition = name def create(self, name): params = { 'partition': { 'max_aflex_file': 32,
'network_partition': 0, 'name': name } } if name != 'shared': self._post("system.partition.create", params) def delete(self, name): if name != 'shared': self.client.session.close() self._post("system.partition.delete", {"name": name})
from django.shortcuts import render_to_response from django.template import RequestContext from django.http import HttpResponseRedirect from django.core.urlresolvers import reverse from .models import Document from .forms import DocumentForm def list(request): # Handle file upload if request.method
== 'POST': form = DocumentForm(request.POST, request.FILES) if form.is_valid(): newdoc = Document(docfile=request.FILES['docfile']) newdoc.save() # Redirect to the document list after POST return HttpResponseRedirect(reverse('ljosmyndasida.photos.views.list')) else: form = DocumentForm() # A empty, unbound form # Load documents for the list page documents
= Document.objects.all() # Render list page with the documents and the form return render_to_response( 'list.html', {'documents': documents, 'form': form}, context_instance=RequestContext(request) )
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-06-20 00:54 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cs_core', '0006_auto_20160619_2151'), ] operations = [ migrations.RemoveField( model_name='responsecontext', name='parent',
), migrations.AlterField( model_name='responseco
ntext', name='delayed_feedback', field=models.BooleanField(default=False, help_text='If set, students will be only be able to see the feedback after the activity expires its deadline.', verbose_name='delayed feedback'), ), ]
import datetime import unittest from iso8601 import ParseError from apel.parsers import BlahParser from apel.db.records.record import InvalidRecordException class ParserBlahTest(unittest.TestCase): ''' Test case for LSF parser ''' def setUp(self): self.parser = BlahParser('testSite', 'testHost') def test_parse(self): line1 = ('"timestamp=2012-05-20 23:59:47" ' +'"userDN=/O=GermanGrid/OU=UniWuppertal/CN=Torsten Harenberg" ' +'"userFQAN=/atlas/Role=production/Capability=NULL" ' +'"ceID=cream-2-fzk.gridka.de:8443/cream-pbs-atlasXL" ' +'"jobID=CREAM410741480" "lrmsID=9575064.lrms1" "localUser=11999"') line1_values = {"TimeStamp": datetime.datetime(2012, 5, 20, 23, 59, 47), "GlobalUserName":"/O=GermanGrid/OU=UniWuppertal/CN=Torsten Harenberg", "FQAN": "/atlas/Role=production/Capability=NULL", "CE": "cream-2-fzk.gridka.de:8443/cream-pbs-atlasXL", "GlobalJobId": "CREAM410741480", "LrmsId": "9575064.lrms1", } cases = {} cases[line1] = line1_values for line in cases.keys(): record = self.parser.parse(line) cont = record._record_content # Keys presence in record self.assertTrue(cont.has_key("TimeStamp")) self.assertTrue(cont.has_key("GlobalUserName")) self.assertTrue(cont.has_key("FQAN")) self.assertTrue(cont.has_key("CE")) self.assertTrue(cont.has_key("GlobalJobId")) self.assertTrue(cont.has_key("LrmsId")) for key in cases[line].keys(): self.assertEqual(cont[key], cases[line][key], "%s != %s for key %s" % (cont[key], cases[line][key], key)) def test_invalid_timestamp(self): ''' Test if parser raises exception for invalid timestamp ''' line_invalidtimestamp = ('"timestamp=2012-05-20A23:59:47" ' +'"userDN=/O=GermanGrid/OU=UniWuppertal/CN=Torsten Harenberg" ' +'"userFQAN=/atlas/Role=production/Capability=NULL" ' +'"ceID=cream-2-fzk.gridka.de:8443/cream-pbs-atlasXL" ' +'"jobID=CREAM410741480" "lrmsID=9575064.lrms1" "localUser=11999"') # Should raise an exception - we have 'A' between date and time try: # iso8601 >= 0.1.9 version of test (now stricter in what it accepts) self.assertRaises(ParseError, self.parser.parse, line_invalidtimestamp) except: # iso8601 <= 0.1.8 version of test (should be deprecated) self.assertRaises(InvalidRecordException, self.parser.parse, line_invalidtimestamp) def test_invalid_record_line(self): line_invalid = ('"timestamp=2012-05-20 23:59:47" ' +'"userDN=/O=GermanGrid/OU=UniWuppertal/CN=Torsten Harenberg" ' +'"userFQAN=/atlas&Role=production/Capability=NULL" ' +'"ceID=cream-2-fzk.gridka.de:8443/cream-pbs-atlasXL" ' +'"jobID=CREAM410741480"&sd"lrmsID=9575064.lrms1" "localUser=11999"') self.assertRaises(ValueError, self.parser.parse, line_invalid) def test_multiple_fqans(self): """The parser should take the first FQAN to be the primary FQAN.""" lines = ( '"timestamp=2014-05-18 00:00:58" "userDN=/C=CA/O=Grid/OU=triumf.ca/' 'CN=Asoka De Silva GC1" "userFQAN=/atlas/Role=pilot/Capability=NULL' '" "userFQAN=/atlas/Role=NULL/Capability=NULL" "userFQAN=/atlas/ca/' 'Role=NULL/Capability=NULL" "userFQAN=/atlas/lcg1/Role=NULL/Capabil' 'ity=NULL" "ceID=ce1.triumf.ca:8443/cream-pbs-atlas" "jobID=CREAM66' '3276716" "lrmsID=15876368.ce1.triumf.ca" "localUser=41200" "client' 'ID=cream_663276716"', '"timestamp=2014-05-18 00:03:00" "userDN=/DC=ch/DC=cern/OU=Organic ' 'Units/OU=Users/CN=atlpilo2/CN=531497/CN=Robot: ATLAS Pilot2" "user' 'FQAN=/atlas/Role=pilot/Capability=NULL" "userFQAN=/atlas/Role=NULL' '/Capability=NULL" "userFQAN=/atlas/lcg1/Role=NULL/Capability=NULL"' ' "userFQAN=/atlas/usatlas/Role=NULL/Capability=NULL" "ceID=ce1.tri' 'umf.ca:8443/cream-pbs-atlas" "jobID=CREAM503347888" "lrmsID=158764' '80.ce1.triumf.ca" "localUser=41200" "clientID=cream_503347888"', ) values = ( (datetime.datetime(2014, 5, 18, 00, 00, 58), '/C=CA/O=Grid/OU=triumf.ca/CN=Asoka De Silva GC1', '/atlas/Role=pilot/Capability=NULL', # primary FQAN is first one 'atlas', '/atlas', 'Role=pilot', 'ce1.triumf.ca:8443/cream-pbs-atlas', 'CREAM663276716', '15876368.ce1.triumf.ca', datetime.datetime(2014, 5, 17, 00, 00, 58), datetime.datetime(2014, 6, 15, 00, 00, 58), 0 ), (datetime.datetime(2014, 5, 18, 00, 03, 00), '/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=atlpilo2/CN=531497/CN' '=Robot: ATLAS Pilot2', '/atlas/Role=pilot/Capability=NULL', # primary FQAN is first one 'atlas', '/atlas', 'Role=pilot', 'ce1.triumf.ca:8443/cream-pbs-atlas', 'CREAM503347888', '15876480.ce1.triumf.ca', datetime.datetime(2014, 5, 17, 00, 03, 00), datetime.datetime(2014, 6, 15, 00, 03, 00), 0 ), ) fields = ('TimeStamp', 'GlobalUserName', 'FQAN', 'VO', 'VOGroup', 'VORole', 'CE', 'GlobalJobId', 'LrmsId', 'ValidFrom', 'ValidUntil', 'Processed') cases
= {} for line, value in zip(lines, values): cases[line] = dict(zip(fields, value)) for line in cases.keys(): record = self.par
ser.parse(line) cont = record._record_content # Check that 'Site' has been set self.assertEqual(cont['Site'], 'testSite') for key in cases[line].keys(): # Check all fields are present self.assertTrue(key in cont, "Key '%s' not in record." % key) # Check values are correct self.assertEqual(cont[key], cases[line][key], "'%s' != '%s' for key '%s'" % (cont[key], cases[line][key], key)) if __name__ == '__main__': unittest.main()
#!/usr/bin/python # Open Global Server Load Balancer (ogslb) # Copyright (C) 2010 Mitchell Broome # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the Li
cense, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Ge
neral Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import sys, os from subprocess import * scriptPath = os.path.realpath(os.path.dirname(sys.argv[0])) # account for where we live sys.path.append(scriptPath + '/..') sys.path.append(scriptPath + '/../lib') import pprint pp = pprint.PrettyPrinter(indent=4) # this is a very basic program to test backend.py. We effectivly emulate the # PowerDNS protocol and talk to it on stdout and read back from stdin. # Basically, just call this with a hostname as an argument if __name__ == '__main__': host = '' try: host = sys.argv[1] except: print "need a hostname to lookup"; sys.exit() try: scriptName = scriptPath + '/backend.py' p = Popen(scriptName, shell=True, bufsize=256, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) # p = Popen(scriptName, shell=True, bufsize=256, stdin=PIPE, stdout=PIPE, close_fds=True) (child_stdin, child_stdout) = (p.stdin, p.stdout) child_stdin.write('HELO\t1\n'); child_stdin.flush() l = child_stdout.readline() print l child_stdin.write('Q\t%s\tIN\tANY\t-1\t127.0.0.1\n' % host); child_stdin.flush() l = child_stdout.readline() print l p.close() except: ''' '''
#! /usr/bin/env python # -*- coding: utf-8 -*- ''' Created on 2012-2-5 @author: zepheir ''' import sys sys.path.append('/app/srv/src') from binascii import b2a_hex try: from twisted.internet import epollreactor epollreactor.install() except: pass from twisted.internet import reactor from twisted.python import log from twist
ed.application import service from zhyDB import ZhyDB import Zoro from ussop import sipai as Sipai import time import config from config import * def ReceiveData(*data): if DEBUG: print 'print data----------------', data # 常量 # ZDB = SipaiDB() zhy = ZhyDB() SipaiModsDict = zhy.listSipaiMods(allSDS
=None) # factoryDict = {} # modules = {} class SampleServer(object): """docstring for SampleServer""" def __init__(self, *sds): super(SampleServer, self).__init__() self.sds = sds self.host,self.port = self.sds[0], int(self.sds[1]) self.modules = [] self.mod = object self.nowtype='' self.factory = Zoro.SetupModbusConnect(self.host, self.port, self.ReceiveData, reConnectMode=False) self.factory.spendtime = 0.3 self.setup() def setup(self): self.modules += SipaiModsDict[self.sds] self.sampletimer = SipaiSampleTimer if ECHO: print "*********** Time pass from start: %s"%(time.ctime()), self.factory.connection.getDestination(),self.factory.getState() def ReceiveData(self, *data): if DEBUG: print ' ===> Received Data:', data, b2a_hex(data[2]) # global zhy _result = self.mod.dealdata(data[2]) print '----------result---------',_result print data[0],data[1],zhy.updateSipaiResults( ip=data[1][0], port=data[1][1], addr=data[0], type=self.nowtype, # value=b2a_hex(data[2]) value=_result ) def update(self): if DEBUG: print "[",self.sds,"] starting in the SampleServer Class!" if len(self.modules)>0: modinfo=self.modules.pop(0) self.nowtype = modinfo['type'] self.mod = Sipai.createspm(type=modinfo['type'], address=modinfo['addr']) _cmd = self.mod.cmd(self.mod.CMD_READDATA) zhy.setSipaiModState( ip=self.host, port=str(self.port), addr=modinfo['addr'], type=self.nowtype, state='reading' ) if DEBUG: print "===> Output command:",b2a_hex(_cmd) reactor.callLater(0.1, self.factory.protocol.SendCmd, _cmd) reactor.callLater(self.factory.spendtime, self.update) self.sampletimer-=self.factory.spendtime else: if SERVERRECONNECT: reactor.callLater(self.factory.spendtime, self.factory.connection.disconnect) reactor.callLater(SdsConnectTimer,self.factory.connection.connect) reactor.callLater(SdsConnectTimer,self.setup) reactor.callLater(self.sampletimer-SdsConnectTimer, self.update) # reactor.callLater(SdsConnectTimer+self.factory.spendtime, self.update) servs ={} def main(): for sds in SipaiModsDict: servs[sds]=SampleServer(sds[0],sds[1]) servs[sds].update() # time.sleep(0.2) # if DEBUG: # # servs1=SampleServer('130.139.200.50','6020') # servs2=SampleServer('130.139.200.51','10001') # # servs3=SampleServer('130.139.200.56','10001') # # servs1.update() # servs2.update() # # servs3.update() # else: # for sds in SipaiModsDict: # servs[sds]=SampleServer(sds[0],sds[1]) # servs[sds].update() # time.sleep(0.2) if __name__ == '__main__': import sys main() reactor.run() print 'reactor stopped!' sys.exit(1) elif __name__ =="__builtin__": import sys main() application = service.Application("SIPAI")
"""Test that types defined in shared libraries work correctly.""" import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lld
btest import * from lldbsuite.test import lldbutil class TestRealDefinition(TestBase): mydir = TestBase.compute_mydir(__file__) @skipUnlessDarwin def test_frame_var_after_stop_at_implementation(self): """Test that we can find the implementation for an objective C type""" if self.getArchitecture() == 'i386': self.skipTest("requires modern objc runtime") self.build() sel
f.shlib_names = ["libTestExt.dylib", "libTest.dylib"] self.common_setup() line = line_number('TestExt/TestExt.m', '// break here') lldbutil.run_break_set_by_file_and_line( self, 'TestExt.m', line, num_expected_locations=1, loc_exact=True) self.runCmd("run", RUN_SUCCEEDED) # The stop reason of the thread should be breakpoint. self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT, substrs=['stopped', 'stop reason = breakpoint']) self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE, substrs=[' resolved, hit count = 1']) # This should display correctly. self.expect( "expr 42", "A simple expression should execute correctly", substrs=[ "42"]) def common_setup(self): exe = self.getBuildArtifact("a.out") target = self.dbg.CreateTarget(exe) self.registerSharedLibrariesWithTarget(target, self.shlib_names) self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
INPUT_DIRECTORY = 'input/' OUTPUT_DIRECTORY = 'output/' IMAGE_FILE_EXTENSION = '.JPG' MAX_INTENSITY = 255 # 8-bit images def averagingFilter(image): """Each pixel becomes the average of its immediately surrounding pixels. We are doing a simple 3x3 box blur. Referencing below zero wraps around, so top and left sides will be blurred. We are not bothering with the right and bottom edges, because referencing above the image size results in a boundary error. """ width, height = image.shape filteredImage = copy.deepcopy(image) # Avoid right, bottom edges. for i in range(width - 1): for j in range(height - 1): total = 0.0 for i1 in range(i - 1, i + 2): for j1 in range(j - 1, j + 2): total = total + float(image[i1][j1]) filteredImage[i][j] = float(total) / float(9) return filteredImage def gaussianFilter(image): """Each pixel becomes the Gaussian-weighted average of nearby pixels. Referencing below zero wraps around, so top and left sides will be blurred. We are not bothering with the right and bottom edges, because referencing above the image size results in a boundary error. """ width, height = image.shape filteredImage = copy.deepcopy(image) # Avoid right, bottom edges. for i in range(width - 2): for j in range(height - 2): # Mask from homepages.inf.ed.ac.uk/rbf/HIPR2/gsmooth.htm total = 0.0 total += 1 * float(image[i-2][j+2]) total += 4 * float(image[i-1][j+2]) total += 7 * float(image[i-0][j+2]) total += 4 * float(image[i+1][j+2]) total += 1 * float(image[i+2][j+2]) total += 4 * float(image[i-2][j+1]) total += 16 * float(image[i-1][j+1]) total += 26 * float(image[i-0][j+1]) total += 16 * float(image[i+1][j+1]) total += 4 * float(image[i+2][j+1]) total += 7 * float(image[i-2][j+0]) total += 26 * float(image[i-1][j+0]) total += 41 * float(image[i-0][j+0]) total += 26 * float(image[i+1][j+0]) total += 7 * float(image[i+2][j+0]) total += 4 * float(image[i-2][j-1]) total += 16 * float(image[i-1][j-1]) total += 26 * float(image[i-0][j-1]) total += 16 * float(image[i+1][j-1]) total += 4 * float(image[i+2][j-1]) total += 1 * float(image[i-2][j-2]) total += 4 * float(image[i-1][j-2]) total += 7 * float(image[i-0][j-2]) total += 4 * float(image[i+1][j-2]) total += 1 * float(image[i+2][j-2]) filteredImage[i][j] = total / float(273) return filteredImage def medianFilter(image): """Each pixel becomes the median of its immediately surrounding pixels. We are doing a simple 5x5 median blur. Referencing below zero wraps around, so top and left sides will be blurred. We are not bothering with the right and bottom edges, because referencing above the image size results in a boundary error. """ width, height = image.shape filteredImage = copy.deepcopy(image) # Avoid right, bottom edges. for i in range(width - 2): for j in range(height - 2): neighborhood = list() for i1 in range(i - 2, i + 3): for j1 in range(j - 2, j + 3): neighborhood.append(image[i1][j1]) filteredImage[i][j] = numpy.median(neighborhood) return filteredImage def laplacianFilter(image): """Approximates the second derivative, bringing out edges. Referencing below zero wraps around, so top and left sides will be sharpened. We are not bothering with the right and bottom edges, because referencing above the image size results in a boundary error. """ width, height = image.shape filteredImage = copy.deepcopy(image) originalImage = copy.deepcopy(image) # Avoid right, bottom edges. for i in range(width - 1): for j in range(height - 1): # Mask from homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm total = 0.0 total += -1 * float(image[i][j + 1]) total += -1 * float(image[i - 1][j]) total += 4 * float(image[i][j]) total += -1 * float(image[
i + 1][j]) total += -1 * float(image[i][j - 1]) filteredImage[i][j] = originalImage[i][j] + (1.5 * (total / 4.0)) return filteredImage def sobelXFilter(image): """Performs a horizontal Sobel operation. Referencing below zero
wraps around, so top and left sides will be sharpened. We are not bothering with the right and bottom edges, because referencing above the image size results in a boundary error. """ width, height = image.shape filteredImage = copy.deepcopy(image) #originalImage = copy.deepcopy(image) # Remove some noise before we begin. image = medianFilter(image) # Avoid right, bottom edges. for i in range(width - 1): for j in range(height - 1): # Mask from en.wikipedia.org/wiki/Sobel_operator total = 0.0 total += -1 * float(image[i - 1][j - 1]) total += 1 * float(image[i + 1][j - 1]) total += -2 * float(image[i - 1][j]) total += 2 * float(image[i + 1][j]) total += -1 * float(image[i - 1][j + 1]) total += 1 * float(image[i + 1][j + 1]) #filteredImage[i][j] = originalImage[i][j] + (total / 6.0) filteredImage[i][j] = total / 6.0 return filteredImage def sobelYFilter(image): """Performs a vertical Sobel operation. Referencing below zero wraps around, so top and left sides will be sharpened. We are not bothering with the right and bottom edges, because referencing above the image size results in a boundary error. """ width, height = image.shape filteredImage = copy.deepcopy(image) #originalImage = copy.deepcopy(image) # Remove some noise before we begin. image = medianFilter(image) # Avoid right, bottom edges. for i in range(width - 1): for j in range(height - 1): # Mask from en.wikipedia.org/wiki/Sobel_operator total = 0.0 total += -1 * float(image[i - 1][j - 1]) total += -2 * float(image[i + 0][j - 1]) total += -1 * float(image[i + 1][j - 1]) total += 1 * float(image[i - 1][j + 1]) total += 2 * float(image[i - 0][j + 1]) total += 1 * float(image[i + 1][j + 1]) #filteredImage[i][j] = originalImage[i][j] + (total / 6.0) filteredImage[i][j] = total / 6.0 return filteredImage def sobelXYFilter(image): """ Combines the Sobel X and Y filters to find all edges. """ width, height = image.shape xFiltered = sobelXFilter(copy.deepcopy(image)) yFiltered = sobelYFilter(copy.deepcopy(image)) for i in range(width): for j in range(height): x = xFiltered[i][j] y = yFiltered[i][j] image[i][j] = math.sqrt((x ** 2) + (y ** 2)) return image def saveImage(image, filename): """Saves the image in the output directory with the filename given. """ cv2.imwrite(OUTPUT_DIRECTORY + filename + IMAGE_FILE_EXTENSION, image) def openImage(fileName): """Opens the image in the input directory with the filename given. """ return cv2.imread(INPUT_DIRECTORY + fileName + IMAGE_FILE_EXTENSION, 0) # Input images inputForBlurring = 'fabio' inputForSharpening = 'bball' # Import image. imageForBlurring = openImage(inputForBlurring) imageForSharpening = openImage(inputForSharpening) ## Run filters on image, save. #print("Averaging Filter...") #saveImage(averagingFilter(imageForBlurring), inputForBlurring + 'Averaging') # #print("Gaussian Filter...") #saveImage(gaussianFilter(imageForBlurring), inputForBlurring + 'Gauss') # #print("Median Filter...") #saveImage(medianFilter(imageForBlurring), inputForBlurring + 'Median') print("Laplacian Filter...") saveImage(laplacianFilter(imageForSharpening), inputForSharpening + 'Laplace') print("Sobel X Filter...") saveImage(sobelXFilter(imageForSharpening), inputForSharpening + 'XSobel') print("Sobel Y Filter...") saveImage(sobelYFilter(imageForSharpening), inputForSharpening + 'YSobel') print("Sobel XY Filter...") saveImage(sobelXYFilter(imageForSharpening), inputForSharpening + 'XYSobel') print(
t( issue=issue.to_json(), project=issue.project.to_json(), agent=user_obj.username, ) ) return 'Comment added' def add_issue_tag(session, issue, tags, user, ticketfolder): ''' Add a tag to an issue. ''' user_obj = __get_user(session, user) if isinstance(tags, basestring): tags = [tags] msgs = [] added_tags = [] for issue_tag in tags: known = False for tag_issue in issue.tags: if tag_issue.tag == issue_tag: known = True if known: continue tagobj = get_tag(session, issue_tag) if not tagobj: tagobj = model.Tag(tag=issue_tag) session.add(tagobj) session.flush() issue_tag = model.TagIssue( issue_uid=issue.uid, tag=tagobj.tag, ) session.add(issue_tag) # Make sure we won't have SQLAlchemy error before we create the repo session.flush() added_tags.append(tagobj.tag) pagure.lib.git.update_git( issue, repo=issue.project, repofolder=ticketfolder) if not issue.private: pagure.lib.notify.fedmsg_publish( 'issue.tag.added', dict( issue=issue.to_json(), project=issue.project.to_json(), tags=added_tags, agent=user_obj.username, ) ) if added_tags: return 'Tag added: %s' % ', '.join(added_tags) else: return 'Nothing to add' def add_issue_assignee(session, issue, assignee, user, ticketfolder): ''' Add an assignee to an issue, in other words, assigned an issue. ''' user_obj = __get_user(session, user) if assignee is None and issue.assignee != None: issue.assignee_id = None session.add(issue) session.commit() pagure.lib.git.update_git( issue, repo=issue.project, repofolder=ticketfolder) pagure.lib.notify.notify_assigned_issue(issue, None, user_obj)
if not issue.private: pagure.lib.notify.fedmsg_publish( 'issue.assigned.reset', dict( issue=issue.to_json(), project=issue.project.to_json(), agent=user_obj.username, ) ) return 'Assignee reset' elif assignee is None and issue.as
signee == None: return # Validate the assignee assignee_obj = __get_user(session, assignee) if issue.assignee_id != assignee_obj.id: issue.assignee_id = assignee_obj.id session.add(issue) session.flush() pagure.lib.git.update_git( issue, repo=issue.project, repofolder=ticketfolder) pagure.lib.notify.notify_assigned_issue( issue, assignee_obj, user_obj) if not issue.private: pagure.lib.notify.fedmsg_publish( 'issue.assigned.added', dict( issue=issue.to_json(), project=issue.project.to_json(), agent=user_obj.username, ) ) return 'Issue assigned' def add_issue_dependency(session, issue, issue_blocked, user, ticketfolder): ''' Add a dependency between two issues. ''' user_obj = __get_user(session, user) if issue.uid == issue_blocked.uid: raise pagure.exceptions.PagureException( 'An issue cannot depend on itself' ) if issue_blocked not in issue.children: i2i = model.IssueToIssue( parent_issue_id=issue_blocked.uid, child_issue_id=issue.uid ) session.add(i2i) # Make sure we won't have SQLAlchemy error before we create the repo session.flush() pagure.lib.git.update_git( issue, repo=issue.project, repofolder=ticketfolder) pagure.lib.git.update_git( issue_blocked, repo=issue_blocked.project, repofolder=ticketfolder) #pagure.lib.notify.notify_assigned_issue(issue, user_obj) #pagure.lib.notify.notify_assigned_issue(issue_blocked, user_obj) if not issue.private: pagure.lib.notify.fedmsg_publish( 'issue.dependency.added', dict( issue=issue.to_json(), project=issue.project.to_json(), added_dependency=issue_blocked.id, agent=user_obj.username, ) ) return 'Dependency added' def remove_issue_dependency(session, issue, issue_blocked, user, ticketfolder): ''' Remove a dependency between two issues. ''' user_obj = __get_user(session, user) if issue.uid == issue_blocked.uid: raise pagure.exceptions.PagureException( 'An issue cannot depend on itself' ) if issue_blocked in issue.children: for child in issue.children: if child.uid == issue_blocked.uid: issue.children.remove(child) # Make sure we won't have SQLAlchemy error before we create the repo session.flush() pagure.lib.git.update_git( issue, repo=issue.project, repofolder=ticketfolder) pagure.lib.git.update_git( issue_blocked, repo=issue_blocked.project, repofolder=ticketfolder) #pagure.lib.notify.notify_assigned_issue(issue, user_obj) #pagure.lib.notify.notify_assigned_issue(issue_blocked, user_obj) if not issue.private: pagure.lib.notify.fedmsg_publish( 'issue.dependency.removed', dict( issue=issue.to_json(), project=issue.project.to_json(), removed_dependency=child.id, agent=user_obj.username, ) ) return 'Dependency removed' def remove_tags(session, project, tags, ticketfolder, user): ''' Removes the specified tag of a project. ''' user_obj = __get_user(session, user) if not isinstance(tags, list): tags = [tags] issues = search_issues(session, project, closed=False, tags=tags) issues.extend(search_issues(session, project, closed=True, tags=tags)) msgs = [] removed_tags = [] if not issues: raise pagure.exceptions.PagureException( 'No issue found with the tags: %s' % ', '.join(tags)) else: for issue in issues: for issue_tag in issue.tags: if issue_tag.tag in tags: tag = issue_tag.tag removed_tags.append(tag) session.delete(issue_tag) msgs.append('Removed tag: %s' % tag) pagure.lib.git.update_git( issue, repo=issue.project, repofolder=ticketfolder) pagure.lib.notify.fedmsg_publish( 'project.tag.removed', dict( project=project.to_json(), tags=removed_tags, agent=user_obj.username, ) ) return msgs def remove_tags_issue(session, issue, tags, ticketfolder, user): ''' Removes the specified tag(s) of a issue. ''' user_obj = __get_user(session, user) if isinstance(tags, basestring): tags = [tags] removed_tags = [] for issue_tag in issue.tags: if issue_tag.tag in tags: tag = issue_tag.tag removed_tags.append(tag) session.delete(issue_tag) pagure.lib.git.update_git( issue, repo=issue.project, repofolder=ticketfolder) pagure.lib.notify.fedmsg_publish( 'issue.tag.removed', dict( issue=issue.to_json(), project=issue.project.to_json(), tags=removed_tags, agent=user_obj.username, ) ) return 'Removed tag: %s' % ', '.join(removed_tags) def edit_issue_tags(session, project, old_tag, new_tag, ticketfolder, user): ''' Removes the specified tag of a project. '
CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The backups api.""" import webob from webob import exc from cinder.api import common from cinder.api import extensions from cinder.api.openstack import wsgi from cinder.api.views import backups as backup_views from cinder.api import xmlutil from cinder import backup as backupAPI from cinder import exception from cinder.openstack.common import log as logging from cinder import utils L
OG = logging.getLogger(__name__) def make_backup(elem): elem.set('id') elem.set('status') elem.set('size') elem.set('container') elem.set('volume_id') elem.set('object_count')
elem.set('availability_zone') elem.set('created_at') elem.set('name') elem.set('description') elem.set('fail_reason') def make_backup_restore(elem): elem.set('backup_id') elem.set('volume_id') def make_backup_export_import_record(elem): elem.set('backup_service') elem.set('backup_url') class BackupTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('backup', selector='backup') make_backup(root) alias = Backups.alias namespace = Backups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class BackupsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('backups') elem = xmlutil.SubTemplateElement(root, 'backup', selector='backups') make_backup(elem) alias = Backups.alias namespace = Backups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class BackupRestoreTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('restore', selector='restore') make_backup_restore(root) alias = Backups.alias namespace = Backups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class BackupExportImportTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('backup-record', selector='backup-record') make_backup_export_import_record(root) alias = Backups.alias namespace = Backups.namespace return xmlutil.MasterTemplate(root, 1, nsmap={alias: namespace}) class CreateDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) backup = self._extract_backup(dom) return {'body': {'backup': backup}} def _extract_backup(self, node): backup = {} backup_node = self.find_first_child_named(node, 'backup') attributes = ['container', 'display_name', 'display_description', 'volume_id'] for attr in attributes: if backup_node.getAttribute(attr): backup[attr] = backup_node.getAttribute(attr) return backup class RestoreDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) restore = self._extract_restore(dom) return {'body': {'restore': restore}} def _extract_restore(self, node): restore = {} restore_node = self.find_first_child_named(node, 'restore') if restore_node.getAttribute('volume_id'): restore['volume_id'] = restore_node.getAttribute('volume_id') return restore class BackupImportDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): dom = utils.safe_minidom_parse_string(string) backup = self._extract_backup(dom) retval = {'body': {'backup-record': backup}} return retval def _extract_backup(self, node): backup = {} backup_node = self.find_first_child_named(node, 'backup-record') attributes = ['backup_service', 'backup_url'] for attr in attributes: if backup_node.getAttribute(attr): backup[attr] = backup_node.getAttribute(attr) return backup class BackupsController(wsgi.Controller): """The Backups API controller for the OpenStack API.""" _view_builder_class = backup_views.ViewBuilder def __init__(self): self.backup_api = backupAPI.API() super(BackupsController, self).__init__() @wsgi.serializers(xml=BackupTemplate) def show(self, req, id): """Return data about the given backup.""" LOG.debug(_('show called for member %s'), id) context = req.environ['cinder.context'] try: backup = self.backup_api.get(context, backup_id=id) except exception.BackupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) return self._view_builder.detail(req, backup) def delete(self, req, id): """Delete a backup.""" LOG.debug(_('delete called for member %s'), id) context = req.environ['cinder.context'] LOG.audit(_('Delete backup with id: %s'), id, context=context) try: self.backup_api.delete(context, id) except exception.BackupNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.InvalidBackup as error: raise exc.HTTPBadRequest(explanation=error.msg) return webob.Response(status_int=202) @wsgi.serializers(xml=BackupsTemplate) def index(self, req): """Returns a summary list of backups.""" return self._get_backups(req, is_detail=False) @wsgi.serializers(xml=BackupsTemplate) def detail(self, req): """Returns a detailed list of backups.""" return self._get_backups(req, is_detail=True) def _get_backups(self, req, is_detail): """Returns a list of backups, transformed through view builder.""" context = req.environ['cinder.context'] backups = self.backup_api.get_all(context) limited_list = common.limited(backups, req) if is_detail: backups = self._view_builder.detail_list(req, limited_list) else: backups = self._view_builder.summary_list(req, limited_list) return backups # TODO(frankm): Add some checks here including # - whether requested volume_id exists so we can return some errors # immediately # - maybe also do validation of swift container name @wsgi.response(202) @wsgi.serializers(xml=BackupTemplate) @wsgi.deserializers(xml=CreateDeserializer) def create(self, req, body): """Create a new backup.""" LOG.debug(_('Creating new backup %s'), body) if not self.is_valid_body(body, 'backup'): raise exc.HTTPBadRequest() context = req.environ['cinder.context'] try: backup = body['backup'] volume_id = backup['volume_id'] except KeyError: msg = _("Incorrect request body format") raise exc.HTTPBadRequest(explanation=msg) container = backup.get('container', None) name = backup.get('name', None) description = backup.get('description', None) LOG.audit(_("Creating backup of volume %(volume_id)s in container" " %(container)s"), {'volume_id': volume_id, 'container': container}, context=context) try: new_backup = self.backup_api.create(context, name, description, volume_id, container) except exception.InvalidVolume as error: raise exc.HTTPBadRequest(explanation=error.msg) except exception.VolumeNotFound as error: raise exc.HTTPNotFound(explanation=error.msg) except exception.ServiceNotFound as error: raise exc.HTTPInternalServerError(explanation=error.msg) retval = self._view_builder.summary(req, dict(new_backup.iteritems())) return retval @wsgi.response(202) @wsgi.serializers(xml=
#!/usr/bin/env python import telnetlib import subprocess import signal import time ##########################################
##################### # This script will automatically flash and start a GDB debug # session to the STM32 discovery board using OpenOCD. It is # meant to be called from the rake task "debug" (execute # rake debug) and the working directory is assumed to be the # project root ############################################################### ############################################################### # We need to be able to send a SIGTERM (ctrl-c) to GDB # without killing openocd or this script. Set up
a custom # signal handler here that essentially ignores SIGTERM ############################################################### def signal_handler(signal, frame): pass # do nothing ############################################################### # Start up the openocd thread ############################################################### # We need gdb to respond to a SIGINT (ctrl-c), but by default, # that will cause every other child process to die, including # openocd. Disable sigint, then re-enable it after the child # spawns. The child inherits the current state of signal # handlers. signal.signal(signal.SIGINT, signal.SIG_IGN) openocd = subprocess.Popen(["openocd"]) time.sleep(2) # Wait for this to start up # Set up a custom signal handler so that SIGINT doesn't kill # this script signal.signal(signal.SIGINT, signal_handler) ############################################################### # Flash the new image to the development board ############################################################### # Create the flashable image subprocess.call(["arm-none-eabi-objcopy", "-Obinary", "build/flash.elf", "build/flash.bin"]) # Flash the image tn = telnetlib.Telnet("127.0.0.1", "4444") tn.read_until("> ") tn.write("poll\n") tn.read_until("> ") tn.write("reset halt\n") tn.read_until("> ") tn.write("flash probe 0\n") tn.read_until("> ") tn.write("flash write_image erase build/flash.bin 0x08000000\n") tn.read_until("> ") tn.write("reset\n") tn.read_until("> ") tn.write("exit\n") tn.close() ############################################################### # Start the gdb session ############################################################### time.sleep(2) gdb_proc = subprocess.Popen(["arm-none-eabi-gdb", "-ex", "target remote localhost:3333", "build/flash.elf", "-ex", "set remote hardware-breakpoint-limit 6", "-ex", "set remote hardware-watchpoint-limit 4"]) # Spin until GDB is exited while gdb_proc.poll() == None: time.sleep(1) # Gracefully exit openocd openocd.terminate()
) else: return json_response({'state': 'NONE'}) def srs_step_view(req, upload_session): import_session = upload_session.import_session form = None if req.method == 'POST': form = forms.SRSForm(req.POST) if form.is_valid(): srs = form.cleaned_data['srs'] upload.srs_step(upload_session, srs) return _next_step_response(req, upload_session) task = import_session.tasks[0] # CRS missing/unknown if task.state == 'NO_CRS': native_crs = task.layer.srs form = form or forms.SRSForm() if form: name = task.layer.name return render_to_response('upload/layer_upload_crs.html', RequestContext(req, { 'native_crs': native_crs, 'form': form, 'layer_name': name })) # mark this completed since there is no post-back when skipping upload_session.completed_step = 'srs' return _next_step_response(req, upload_session) latitude_names = set(['latitude', 'lat']) longitude_names = set(['longitude', 'lon', 'lng', 'long']) def is_latitude(colname): return colname.lower() in latitude_names def is_longitude(colname): return colname.lower() in longitude_names def csv_step_view(request, upload_session): import_session = upload_session.import_session attributes = import_session.tasks[0].layer.attributes # need to check if geometry is found # if so, can proceed directly to next step for attr in attributes: if attr.binding == u'com.vividsolutions.jts.geom.Point': upload_session.completed_step = 'csv' return _next_step_response(request, upload_session) # no geometry found, let's find all the numerical columns number_names = ['java.lang.Integer', 'java.lang.Double'] point_candidates = sorted([attr.name for attr in attributes if attr.binding in number_names]) # form errors to display to user error = None lat_field = request.POST.get('lat', '') lng_field = request.POST.get('lng', '') if request.method == 'POST': if not lat_field or not lng_field: error = 'Please choose which columns contain the latitude and longitude data.' elif (lat_field not in point_candidates or lng_field not in point_candidates): error = 'Invalid latitude/longitude columns' elif lat_field == lng_field: error = 'You cannot select the same column for latitude and longitude data.' if not error: upload.csv_step(upload_session, lat_field, lng_field) return _next_step_response(request, upload_session) # try to guess the lat/lng fields from the candidates lat_candidate = None lng_candidate = None non_str_in_headers = [] for candidate in attributes: if not isinstance(candidate.name, basestring): non_str_in_headers.append(str(candidate.name)) if candidate.name in point_candidates: if is_latitude(candidate.name): lat_candidate = candidate.name elif is_longitude(candidate.name): lng_candidate = candidate.name if request.method == 'POST': guessed_lat_or_lng = False selected_lat = lat_field selected_lng = lng_field else: guessed_lat_or_lng = bool(lat_candidate or lng_candidate) selected_lat = lat_candidate selected_lng = lng_candidate present_choices = len(point_candidates) >= 2 possible_data_problems = None if non_str_in_headers: possible_data_problems = "There are some suspicious column names in \ your data. Did you provide column names in the header? \ The following names look wrong: " possible_data_problems += ','.join(non_str_in_headers) context = dict(present_choices=present_choices, point_candidates=point_candidates, async_upload=_is_async_step(upload_session), selected_lat=selected_lat, selected_lng=selected_lng, guessed_lat_or_lng=guessed_lat_or_lng, layer_name=import_session.tasks[0].layer.name, error=error, possible_data_problems=possible_data_problems ) return render_to_response('upload/layer_upload_csv.html', RequestContext(request, context)) def time_step_view(request, upload_session): import_session = upload_session.import_session if request.method == 'GET': # check for invalid attribute names store_type = import_session.tasks[0].target.store_type if store_type == 'dataStore': layer = import_session.tasks[0].layer invalid = filter( lambda a: str( a.name).find(' ') >= 0, layer.attributes) if invalid: att_list = "<pre>%s</pre>" % '. '.join( [a.name for a in invalid]) msg = "Attributes with spaces are not supported : %s" % att_list return render_to_response( 'upload/layer_upload_error.html', RequestContext(request, {'error_msg': msg})) context = { 'time_form': _create_time_form(import_session, None), 'layer_name': import_session.tasks[0].layer.name, 'async_upload': _is_async_step(upload_session) } return render_to_response('upload/layer_upload_time.html', RequestContext(request, context)) elif request.method != 'POST': raise Exception() form = _create_time_form(import_session, request.POST) if not form.is_valid(): logger.wa
rning('Invalid upload form: %s', form.errors) return _error_response(request, errors=["Invalid Submission"])
cleaned = form.cleaned_data start_attribute_and_type = cleaned.get('start_attribute', None) if start_attribute_and_type: def tx(type_name): return None if type_name is None or type_name == 'Date' \ else 'DateFormatTransform' end_attribute, end_type = cleaned.get('end_attribute', (None, None)) upload.time_step( upload_session, time_attribute=start_attribute_and_type[0], time_transform_type=tx(start_attribute_and_type[1]), time_format=cleaned.get('attribute_format', None), end_time_attribute=end_attribute, end_time_transform_type=tx(end_type), end_time_format=cleaned.get('end_attribute_format', None), presentation_strategy=cleaned['presentation_strategy'], precision_value=cleaned['precision_value'], precision_step=cleaned['precision_step'], ) return _next_step_response(request, upload_session) def run_import(upload_session, async=_ASYNC_UPLOAD): # run_import can raise an exception which callers should handle upload.run_import(upload_session, async) def run_response(req, upload_session): run_import(upload_session) if _ASYNC_UPLOAD: next = get_next_step(upload_session) return _progress_redirect(next) return _next_step_response(req, upload_session) def final_step_view(req, upload_session): try: saved_layer = upload.final_step(upload_session, req.user) except upload.LayerNotReady: return json_response({'status': 'pending', 'success': True, 'redirect_to': '/upload/final'}) # this response is different then all of the other views in the # upload as it does not return a response as a json object return json_response( {'url': saved_layer.get_absolute_url(), 'success': True } ) _steps = { 'save': save_step_view, 'time': time_step_view, 'srs': srs_step_view,
# -*- coding: utf-8 -*- """ Created on Tue Aug 25 13:08:19 2015 @author: jgimenez """ from PyQt4 import QtGui, QtCore from initialConditions_ui import Ui_initialConditionsUI import os from utils import * from PyFoam.RunDictionary.BoundaryDict import BoundaryDict from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile from utils import types try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) unknowns = ['U','p','p_rgh','alpha','k','epsilon','omega','nut','nuTilda'] class initialConditionsUI(QtGui.QScrollArea, Ui_initialConditionsUI): def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()): QtGui.QScrollArea.__init__(self, parent) self.setupUi(self) class initialConditionsWidget(initialConditionsUI): def __init__(self,folder): self.currentFolder = folder initialConditionsUI.__init__(self) [self.timedir,self.fields,currtime] = currentFields(self.currentFolder) self.pushButton.setEnabled(False) self.addTabs() def addTabs(self,ipatch=None): for itab in range(self.tabWidget.count()): layout = self.tabWidget.widget(itab).findChildren(QtGui.QVBoxLayout)[0] self.clearLayout(layout,0) self.tabWidget.clear() for ifield in self.fields: if ifield not in unknowns: continue widget = QtGui.QWidget() layout = QtGui.QVBoxLayout(widget) layout2 = QtGui.QHBoxLayout() cb = QtGui.QComboBox() cb.addItems(['uniform','nonuniform']) layout2.addWidget(cb) if types[ifield]=='scalar': ledit = QtGui.QLineEdit() ledit.setValidator(QtGui.QDoubleValidator()) QtCore.QObject.connect(ledit, QtCore.SIGNAL(_fromUtf8("textEdited(QString)")), self.checkData) layout2.addWidget(ledit) else: for j in range(3): ledit = QtGui.QLineEdit() ledit.setValidator(QtGui.QDoubleValidator()) layout2.addWidget(ledit) QtCore.QObject.connect(ledit, QtCore.SIGNAL(_fromUtf8("textEdited(QString)")), self.checkData) layout.addLayout(layout2) if ifield=='U': qbutton = QtGui.QCheckBox() qbutton.setText('Initialize from potential flow') layout.addWidget(qbutton) QtCore.QObject.connect(qbutton, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")), self.onPotentialFlow
) spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding) layout.addItem(spacerItem) self.tabWidget.addTab(widget, ifield) self.tabWidget.setTabText(self.tabWidget.count(),ifield) def onPotentia
lFlow(self): for itab in range(self.tabWidget.count()): ifield = self.tabWidget.tabText(itab) if ifield=='U': print ifield layout = self.tabWidget.widget(itab).findChildren(QtGui.QVBoxLayout)[0] cb = self.tabWidget.widget(itab).findChildren(QtGui.QCheckBox)[0] layout2 = layout.itemAt(0).layout() for i in range(layout2.count()): if isinstance(layout2.itemAt(i), QtGui.QWidgetItem): layout2.itemAt(i).widget().setEnabled(not cb.isChecked()) def clearLayout(self, layout, dejar): for i in reversed(range(layout.count())): if i>= dejar: item = layout.itemAt(i) if isinstance(item, QtGui.QWidgetItem): item.widget().close() item.widget().deleteLater() # or # item.widget().setParent(None) elif isinstance(item, QtGui.QSpacerItem): None # no need to do extra stuff else: self.clearLayout(item.layout(),0) # remove the item from layout layout.removeItem(item) def setConditions(self): runPotentialFlow = 0 for itab in range(self.tabWidget.count()): ifield = self.tabWidget.tabText(itab) layout = self.tabWidget.widget(itab).findChildren(QtGui.QVBoxLayout)[0] filename = '%s/%s'%(self.timedir,ifield) parsedData = ParsedParameterFile(filename,createZipped=False) layout2 = layout.itemAt(0).layout() if layout2.count()==2: parsedData['internalField'] = '%s %s'%(layout2.itemAt(0).widget().currentText(),layout2.itemAt(1).widget().text()) else: if ifield == 'U' and self.tabWidget.widget(itab).findChildren(QtGui.QCheckBox)[0].isChecked(): runPotentialFlow = 1 parsedData['internalField'] = '%s (%s %s %s)'%('uniform',0,0,0) else: parsedData['internalField'] = '%s (%s %s %s)'%(layout2.itemAt(0).widget().currentText(),layout2.itemAt(1).widget().text(),layout2.itemAt(2).widget().text(),layout2.itemAt(3).widget().text()) parsedData.writeFile() self.pushButton.setEnabled(False) if runPotentialFlow: QtGui.QMessageBox.about(self, "ERROR", 'Debe simularse con potentialFoam, hacer!!') return def checkData(self): ready = True for itab in range(self.tabWidget.count()): edits = self.tabWidget.widget(itab).findChildren(QtGui.QLineEdit) for E in edits: if E.isEnabled(): if not E.text(): ready = False if ready: self.pushButton.setEnabled(True) else: self.pushButton.setEnabled(False)
import os import sys import zipfile def zip_directory(directory, targetfn=None, relative=True, compress_type=zipfile.ZIP_DEFLATED, verbose=1): """Zip all files and folders in a directory. Args: directory: The directory whose contents should be zipped. targetfn: Output filename of the zipped archive. relative: If True, make the arcname relative to the input directory. compress_type: Which kind of compression to use. See zipfile package. verbose: How much information to print to stdout while creating the archive. Returns: The filename of the zipped archive. """ assert os.path.isdir(directory) if targetfn is None: targetfn = directory + ".zip" filecount = 0 if verbose and verbose > 0: print("Creating archive %r from directory %r:" % (targetfn, directory)) with zipfile.ZipFile(targetfn, mode="w") as zipfd: for dirpath, dirnames, filenames in os.walk(directory): for fname in filenames: fpath = os.path.join(dirpath, fname) arcname = os.path.relpath(fpath, start=directory) if relative else fpath if verbose and verbose > 0: print(" - adding %r" % (arcname,)) zipfd.write(fpath, arcname=arcname, compress_type=compress_type) filecount += 1 if verbose and verbose > 0: print("\n%s files written
to archive %r" % (filecount, targetfn)) return targetfn def convert_str_to_int(s, do_float=True, do_eval=True): try: return int(s) except ValueError as e: if do_float: try: return convert_str_to_int(float(s), do_float=False, do_eval=False) except ValueError as e: try: import humanfriendly except ImportError: print((
"Warning, the `humanfriendly` package is not available." "If you want to use e.g. \"500kb\" as filesize, " "please install the `humanfriendly` package:\n" " pip install humanfriendly\n")) pass humanfriendly = None else: try: return humanfriendly.parse_size(s) except humanfriendly.InvalidSize: pass if do_eval: try: return convert_str_to_int(eval(s), do_float=do_float, do_eval=False) except (ValueError, SyntaxError) as e: print("Error, could not parse/convert string %r as integer. " % (s,)) raise e else: print("Error, could not parse/convert string %r as integer. " % (s,)) raise e else: print("Error, could not parse/convert string %r as integer. " % (s,)) raise e def open_pptx(fpath): """WIP: Open a pptx presentation in PowerPoint on any platform.""" import subprocess import shlex if 'darwin' in sys.platform: exec = 'open -a "Microsoft PowerPoint"' else: raise NotImplementedError("Opening pptx files not yet supported on Windows.") # TODO: The right way to do this is probably to search the registry using _winreg package. p = subprocess.Popen(shlex.split(exec) + [fpath])
import datetime try: import urllib.parse as urlparse except ImportError: from urllib.urlparse import urlparse from django_jinja import library from django.utils.http import urlencode @library.global_function def thisyear(): """The current year.""" return datetime.date.today().year @library.filter def urlparams(url_, hash=None, **query): """Add a fragment and/or query paramaters to a URL. New query params will be appended to exising parameters, except duplicate names, which will be re
placed. """ url = urlparse.urlparse(url_) fragment = hash if hash is not None else url.fragment # Use dict(parse_qsl) so we don't get lists of values. query_dict = dict(urlparse.parse_qsl(url.query)) query_dict.update(query) query_string = urlencode( [(k, v) for k, v in query_dict.items() if v is not None]) new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment) return new.geturl()
#!/usr/bin/env python # Lint as: python3 """Tests for PrometheusStatsCollector.""" from absl import app from grr_response_core.stats import stats_test_utils from grr_response_server import prometheus_stats_collector fr
om grr.test_lib import test_lib class PrometheusStatsCollectorTest(stats
_test_utils.StatsCollectorTest): def _CreateStatsCollector(self): return prometheus_stats_collector.PrometheusStatsCollector() def main(argv): test_lib.main(argv) if __name__ == "__main__": app.run(main)
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2007 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Frank
lin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # gen.filters.rules/Person/_ChangedSince.py #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from .._changedsincebase import ChangedSinceBase #------------------------------------------------------------------------- # # ChangedSince # #------------------------------------------------------------------------- class ChangedSince(ChangedSinceBase): """Rule that checks for persons changed since a specific time.""" labels = [ _('Changed after:'), _('but before:') ] name = _('Persons changed after <date time>') description = _("Matches person records changed after a specified " "date-time (yyyy-mm-dd hh:mm:ss) or in the range, if a second " "date-time is given.")
# -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2016 CERN. # # Zenodo is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Zenodo is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNE
SS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have
received a copy of the GNU General Public License # along with Zenodo; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. """Deposit serialization schemas.""" from __future__ import absolute_import, print_function
# Copyright 2016 Virgil Dupras # # This software is licensed under the "GPLv3" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.gnu.org/licenses/gpl-3.0.html from PyQt5.QtCore import Qt from PyQt5.QtWidgets import ( QDialog, QLineEdit, QSpinBox, QComboBox, QCheckBox, QPlainTextEdit ) class Panel(QDialog): # A list of two-sized tuples (QWidget's name, model field name). FIELDS = [] # Name to use for serialization of persistent data about this panel (geometry). # XXX At the time of this writing (ticket #364), there's already a separate system in Cocoa # to persist dialog frames. A "clean" implementation would do like we do with the main window # and implement frame save/restore in core, but I fear that I'll needlessly complicate things # doing so, so for now, I limit myself to a qt-only solution. Later, we should re-evaluate # whether it could be a good idea to push this implementation to the core. PERSISTENT_NAME = None def __init__(self, mainwindow): # The flags we pass are that so we don't get the "What's this" button in the title bar QDialog.__init__(self, mainwindow, Qt.WindowTitleHint | Qt.WindowSystemMenuHint) self._widget2ModelAttr = {} self.mainwindow = mainwindow def _changeComboBoxItems(self, comboBox, newItems): # When a combo box's items are changed, its currentIndex changed with a currentIndexChanged # signal, and if that signal results in the model being updated, it messes the model. # We thus have to disconnect the combo box's signal before changing the items. if comboBox in self._widget2ModelAttr: comboBox.currentIndexChanged.disconnect(self.comboBoxCurrentIndexChanged) index = comboBox.currentIndex() comboBox.clear() comboBox.addItems(newItems) comboBox.setCurrentIndex(index) if comboBox in self._widget2ModelAttr: comboBox.currentIndexChanged.connect(self.comboBoxCurrentIndexChanged) def _connectSignals(self): for widgetName, modelAttr in self.FIELDS: widget = getattr(self, widgetName) self._widget2ModelAttr[widget] = modelAttr if isinstance(widget, QComboBox): widget.currentIndexChanged.connect(self.comboboxChanged) elif isinstance(widget, QSpinBox): widget.valueChanged.connect(self.spinboxChanged) elif isinstance(widget, QLineEdit): widget.editingFinished.connect(self.lineeditChanged) elif isinstance(widget, QPlainTextEdit): widget.textChanged.connect(self.plaintexteditChanged) elif isinstance(widget, QCheckBox): widget.stateChanged.connect(self.checkboxChanged) def _loadFields(self): for widgetName, modelAttr in self.FIELDS: widget = getattr(self, widgetName) value = getattr(self.model, modelAttr) if isinstance(widget, QComboBox): widget.setCurrentIndex(value) elif isinstance(widget, QSpinBox): widget.setValue(value) elif isinstance(widget, QLineEdit): widget.setText(value) elif isinstance(widget, QPlainTextEdit): widget.setPlainText(value) elif isinstance(widget, QCheckBox): widget.setChecked(value) def _saveFields(self): pass def _loadGeometry(self): if self.PERSISTENT_NAME: self.mainwindow.app.prefs.restoreGeometry('%sGeometry' % self.PERSISTENT_NAME, self) def _saveGeometry(self): if self.PERSISTENT_NAME: self.mainwindow.app.prefs.saveGeometry('%sGeometry' % self.PERSISTENT_NAME, self) def accept(self): # The setFocus() call is to force the last edited field to "commit". When the save button # is clicked, accept() is called before the last field to have focus has a chance to emit # its edition signal. self.setFocus() self.model.save() self._saveGeometry() QDialog.accept(self) def reject(self): self._saveGeometry() super().reject() # --- Event Handlers def _widgetChanged(self, sender, newvalue): modelAttr = self._widget2ModelAttr[sender] setattr(self.model, modelAttr, newvalue) def comboboxChanged(self): sender = self.sender() self._widgetChanged(sender, sender.currentIndex()) def spinboxChanged(self): sender = self.sender() self._widgetChanged(sender, sender.value()) def lineeditChanged(self): sender = self.sender() self._widgetChanged(sender, sender.text()) def plaintexteditChanged(self): sender = self.sender() self._widgetChanged(sender, sender.toPlainText
()) def checkboxChanged(self): sender = self.sender() self._widgetChanged(sender, sender.isChecked()) # --- model --> view def pre_load(self): self._loadGeometry() def pre_save(self): self._saveFields() def post_load(self): if not self._
widget2ModelAttr: # signal not connected yet self._connectSignals() self._loadFields() self.show() # For initial text edits to have their text selected, we *have to* first select the dialog, # then setFocus on it with qt.TabFocusReason. Don't ask, I don't know why either... self.setFocus() focus = self.nextInFocusChain() while focus.focusPolicy() == Qt.NoFocus: focus = focus.nextInFocusChain() focus.setFocus(Qt.TabFocusReason)
import os import re import subprocess def available_cpu_count(): """ Number of available virtual or physical CPUs on this system, i.e. user/real as output by time(1) when called with an optimally scaling userspace-only program""" # cpuset # cpuset may restrict the number of *available* processors try: m = re.search(r'(?m)^Cpus_allowed:\s
*(.*)$',
open('/proc/self/status').read()) if m: res = bin(int(m.group(1).replace(',', ''), 16)).count('1') if res > 0: return res except IOError: pass # Python 2.6+ try: import multiprocessing return multiprocessing.cpu_count() except (ImportError, NotImplementedError): pass # http://code.google.com/p/psutil/ try: import psutil return psutil.NUM_CPUS except (ImportError, AttributeError): pass # POSIX try: res = int(os.sysconf('SC_NPROCESSORS_ONLN')) if res > 0: return res except (AttributeError, ValueError): pass # Windows try: res = int(os.environ['NUMBER_OF_PROCESSORS']) if res > 0: return res except (KeyError, ValueError): pass # jython try: from java.lang import Runtime runtime = Runtime.getRuntime() res = runtime.availableProcessors() if res > 0: return res except ImportError: pass # BSD try: sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE) scStdout = sysctl.communicate()[0] res = int(scStdout) if res > 0: return res except (OSError, ValueError): pass # Linux try: res = open('/proc/cpuinfo').read().count('processor\t:') if res > 0: return res except IOError: pass # Solaris try: pseudoDevices = os.listdir('/devices/pseudo/') res = 0 for pd in pseudoDevices: if re.match(r'^cpuid@[0-9]+$', pd): res += 1 if res > 0: return res except OSError: pass # Other UNIXes (heuristic) try: try: dmesg = open('/var/run/dmesg.boot').read() except IOError: dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE) dmesg = dmesgProcess.communicate()[0] res = 0 while '\ncpu' + str(res) + ':' in dmesg: res += 1 if res > 0: return res except OSError: pass raise Exception('Can not determine number of CPUs on this system')
from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals from SimPEG import Mesh, Utils import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches from scipy.sparse import spdiags,csr_matrix, eye,kron,hstack,vstack,eye,diags import copy from scipy.constants import mu_0 from SimPEG import SolverLU from scipy.sparse.linalg import spsolve,splu from SimPEG.EM import TDEM from SimPEG.EM.Analytics.TDEM import hzAnalyticDipoleT,hzAnalyticCentLoopT from scipy.interpolate import interp2d,LinearNDInterpolator from scipy.special import ellipk,ellipe def rectangular_plane_layout(mesh,corner, closed = False,I=1.): """ corner: sorted list of four corners (x,y,z) 2--3 | | 1--4 y | |--> x Output: Js """ Jx = np.zeros(mesh.nEx) Jy = np.zeros(mesh.nEy) Jz = np.zeros(mesh.nEz) indy1 = np.logical_and( \ np.logical_and( \ np.logical_and(mesh.gridEy[:,0]>=corner[0,0],mesh.gridEy[:,0]<=corner[1,0]), \ np.logical_and(mesh.gridEy[:,1] >=corner[0,1] , mesh.gridEy[:,1]<=corner[1,1] )), (mesh.gridEy[:,2] == corner[0,2] ) ) indx1 = np.logical_and( \ np.logical_and( \ np.logical_and(mesh.gridEx[:,0]>=corner[1,0],mesh.gridEx[:,0]<=
corner[2,0]), \ np.logical_and(mesh.gridEx[:
,1] >=corner[1,1] , mesh.gridEx[:,1]<=corner[2,1] )), (mesh.gridEx[:,2] == corner[1,2] ) ) indy2 = np.logical_and( \ np.logical_and( \ np.logical_and(mesh.gridEy[:,0]>=corner[2,0],mesh.gridEy[:,0]<=corner[3,0]), \ np.logical_and(mesh.gridEy[:,1] <=corner[2,1] , mesh.gridEy[:,1]>=corner[3,1] )), (mesh.gridEy[:,2] == corner[2,2] ) ) if closed: indx2 = np.logical_and( \ np.logical_and( \ np.logical_and(mesh.gridEx[:,0]>=corner[0,0],mesh.gridEx[:,0]<=corner[3,0]), \ np.logical_and(mesh.gridEx[:,1] >=corner[0,1] , mesh.gridEx[:,1]<=corner[3,1] )), (mesh.gridEx[:,2] == corner[0,2] ) ) else: indx2 = [] Jy[indy1] = -I Jx[indx1] = -I Jy[indy2] = I Jx[indx2] = I J = np.hstack((Jx,Jy,Jz)) J = J*mesh.edge return J def BiotSavart(locs,mesh,Js): """ Compute the magnetic field generated by current discretized on a mesh using Biot-Savart law Input: locs: observation locations mesh: mesh on which the current J is discretized Js: discretized source current in A-m (Finite Volume formulation) Output: B: magnetic field [Bx,By,Bz] """ c = mu_0/(4*np.pi) nwire = np.sum(Js!=0.) ind= np.where(Js!=0.) ind = ind[0] B = np.zeros([locs.shape[0],3]) gridE = np.vstack([mesh.gridEx,mesh.gridEy,mesh.gridEz]) for i in range(nwire): # x wire if ind[i]<mesh.nEx: r = locs-gridE[ind[i]] I = Js[ind[i]]*np.hstack([np.ones([locs.shape[0],1]),np.zeros([locs.shape[0],1]),np.zeros([locs.shape[0],1])]) cr = np.cross(I,r) rsq = np.linalg.norm(r,axis=1)**3. B = B + c*cr/rsq[:,None] # y wire elif ind[i]<mesh.nEx+mesh.nEy: r = locs-gridE[ind[i]] I = Js[ind[i]]*np.hstack([np.zeros([locs.shape[0],1]),np.ones([locs.shape[0],1]),np.zeros([locs.shape[0],1])]) cr = np.cross(I,r) rsq = np.linalg.norm(r,axis=1)**3. B = B + c*cr/rsq[:,None] # z wire elif ind[i]<mesh.nEx+mesh.nEy+mesh.nEz: r = locs-gridE[ind[i]] I = Js[ind[i]]*np.hstack([np.zeros([locs.shape[0],1]),np.zeros([locs.shape[0],1]),np.ones([locs.shape[0],1])]) cr = np.cross(I,r) rsq = np.linalg.norm(r,axis=1)**3. B = B + c*cr/rsq[:,None] else: print('error: index of J out of bounds (number of edges in the mesh)') return B def analytic_infinite_wire(obsloc,wireloc,orientation,I=1.): """ Compute the response of an infinite wire with orientation 'orientation' and current I at the obsvervation locations obsloc Output: B: magnetic field [Bx,By,Bz] """ n,d = obsloc.shape t,d = wireloc.shape d = np.sqrt(np.dot(obsloc**2.,np.ones([d,t]))+np.dot(np.ones([n,d]),(wireloc.T)**2.) - 2.*np.dot(obsloc,wireloc.T)) distr = np.amin(d, axis=1, keepdims = True) idxmind = d.argmin(axis=1) r = obsloc - wireloc[idxmind] orient = np.c_[[orientation for i in range(obsloc.shape[0])]] B = (mu_0*I)/(2*np.pi*(distr**2.))*np.cross(orientation,r) return B def mag_dipole(m,obsloc): """ Compute the response of an infinitesimal mag dipole at location (0,0,0) with orientation X and magnetic moment 'm' at the obsvervation locations obsloc Output: B: magnetic field [Bx,By,Bz] """ loc = np.r_[[[0.,0.,0.]]] n,d = obsloc.shape t,d = loc.shape d = np.sqrt(np.dot(obsloc**2.,np.ones([d,t]))+np.dot(np.ones([n,d]),(loc.T)**2.) - 2.*np.dot(obsloc,loc.T)) d = d.flatten() ind = np.where(d==0.) d[ind] = 1e6 x = obsloc[:,0] y = obsloc[:,1] z = obsloc[:,2] #orient = np.c_[[orientation for i in range(obsloc.shape[0])]] Bz = (mu_0*m)/(4*np.pi*(d**3.))*(3.*((z**2.)/(d**2.))-1.) By = (mu_0*m)/(4*np.pi*(d**3.))*(3.*(z*y)/(d**2.)) Bx = (mu_0*m)/(4*np.pi*(d**3.))*(3.*(x*z)/(d**2.)) B = np.vstack([Bx,By,Bz]).T return B def circularloop(a,obsloc,I=1.): """ From Simpson, Lane, Immer, Youngquist 2001 Compute the magnetic field B response of a current loop of radius 'a' with intensity 'I'. input: a: radius in m obsloc: obsvervation locations Output: B: magnetic field [Bx,By,Bz] """ x = np.atleast_2d(obsloc[:,0]).T y = np.atleast_2d(obsloc[:,1]).T z = np.atleast_2d(obsloc[:,2]).T r = np.linalg.norm(obsloc,axis=1) loc = np.r_[[[0.,0.,0.]]] n,d = obsloc.shape r2 = x**2.+y**2.+z**2. rho2 = x**2.+y**2. alpha2 = a**2.+r2-2*a*np.sqrt(rho2) beta2 = a**2.+r2+2*a*np.sqrt(rho2) k2 = 1-(alpha2/beta2) lbda = x**2.-y**2. C = mu_0*I/np.pi Bx = ((C*x*z)/(2*alpha2*np.sqrt(beta2)*rho2))*\ ((a**2.+r2)*ellipe(k2)-alpha2*ellipk(k2)) Bx[np.isnan(Bx)] = 0. By = ((C*y*z)/(2*alpha2*np.sqrt(beta2)*rho2))*\ ((a**2.+r2)*ellipe(k2)-alpha2*ellipk(k2)) By[np.isnan(By)] = 0. Bz = (C/(2.*alpha2*np.sqrt(beta2)))*\ ((a**2.-r2)*ellipe(k2)+alpha2*ellipk(k2)) Bz[np.isnan(Bz)] = 0. #print(Bx.shape) #print(By.shape) #print(Bz.shape) B = np.hstack([Bx,By,Bz]) return B
# -*- coding: utf-8 -*- from fontaine.namelist import codepointsInNamelist class Charset: common_name = u'Google Fonts: Greek Ancient Musical Symbols' native_name = u'' abbrev
iation = 'GREK' def glyphs(self): glyphs = codepointsInNamelist("charsets/internals/google_glyphsets/Greek/GF-greek-ancient-musical-symbols.nam") return g
lyphs
# coding: utf-8 from __future__ import absolute_import from .base import Base
class Install(Base): def __init__(self, config): self.config
= config def run(self): for package in self.config.packages: package.install()
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PyTraceback2(PythonPackage): """Backports of the traceback module""" homepage = "https://github.com/testing-cabal/traceback2" url = "https://pypi.io/packages/sour
ce/t/traceback2/traceback2-
1.4.0.tar.gz" version('1.4.0', '9e9723f4d70bfc6308fa992dd193c400') depends_on('py-setuptools', type='build') depends_on('py-linecache2', type=('build', 'run')) depends_on('py-pbr', type=('build', 'run'))
# -*- coding:utf8 -*- from __future__ import division import codecs import re def calWordProbability(infile, outfile): ''' 计算词概率,源语言词翻译成目标语言词的概率 一个源语言可能对应多个目标语言,这里计算平均值 infile: 输入文件 格式:source word \t target word outfile: source word \t target word \t probability ''' with codecs.open(infile, 'r', 'utf8') as fin: # 用于存储数据结构 wordDic = {} line = fin.readline() linNum = 1 while line: linNum += 1 if linNum % 10001 == 1: print(linNum, line.encode('utf8')) line = line.strip() # 删除两端空白符 wArr = re.split('[ |\t]', line) if len(wArr) >= 2: key = wArr[0] # 源语言词 val = wArr[1] # 目标语言词 if key in wordDic: wordDic[key][val] = 1
else: valMap = dict() valMap[val] = 1 wordDic[key] = valMap line = fin.readline(
) with codecs.open(outfile, 'w', 'utf8') as fout: print('start write') wCount = 0 for key in wordDic.keys(): wCount += 1 if(wCount % 1001 == 0): print('writing', wCount) if len(key.split(' ')) > 1: continue valMap = wordDic[key] valLen = len(valMap) for val in valMap.keys(): fout.write(key) fout.write('\t') fout.write(val) fout.write('\t') fout.write(str(1/valLen)) fout.write('\n')
# Copyright 2011-2012 Nicolas Bessi (Camptocamp) # Copyright 2012-2015 Yannick Vaucher (Camptocamp) # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from odoo import models, fields, api class AccountInvoice(models.Model): _inherit = 'account.invoice' transaction_id = fields.Char(string='Transaction ID', index=True, copy=False, help="Transaction ID from the " "financial institute") @api.multi def finalize_invoice_move_lines(self, move_lines): """Propagate the transaction_id from the invoice to t
he move lines. The transaction ID is written on the move lines only if the account is the same than the invoice's one. """ move_lines = super(AccountI
nvoice, self).finalize_invoice_move_lines( move_lines) for invoice in self: if invoice.transaction_id: invoice_account_id = invoice.account_id.id for line in move_lines: # line is a tuple (0, 0, {values}) if invoice_account_id == line[2]['account_id']: line[2]['transaction_ref'] = invoice.transaction_id return move_lines
from django.conf import settings from django.contrib.auth.decorators import user_passes_test from django.shortcuts import get_object_or_404 from django.utils.decorators import method_decorator from django_filters.rest_framework import DjangoFilterBackend from rest_framework import filters, status, viewsets from rest_framework.decorators import detail_route from rest_framework.response import Response from coupons.filters import CouponFilter from coupons.models import Coupon, ClaimedCoupon from coupons.serializers import CouponSerializer, ClaimedCouponSerializer # based on https://djangosnippets.org/snippets/1703/ def group_required(api_command): """ This is implemented such that it's default open. """ def in_groups(u): if u.is_authenticated(): # supervisor can do anything if u.is_superuser: return True # coupons have permissions set (I think I may set them by default to remove this check) if settings.COUPON_PERMISSIONS and api_command in settings.COUPON_PERMISSIONS: group_names = settings.COUPON_PERMISSIONS[api_command] # but no group specified, so anyone can. if len(group_names) == 0: return True # group specified, so only those in the group can. if bool(u.groups.filter(name__in=group_names)): return True return False return user_passes_test(in_groups) def get_redeemed_queryset(user, coupon_id=None): """ Return a consistent list of the redeemed list. across the two endpoints. """ api_command = 'REDEEMED' # If the a coupon isn't specified, get them all. if coupon_id is None: qs_all = ClaimedCoupon.objects.all() qs_some = ClaimedCoupon.objects.filter(user=user.id) else: qs_all = ClaimedCoupon.objects.filter(coupon=coupon_id) qs_some = ClaimedCoupon.objects.filter(coupon=coupon_id, user=user.id) if user.is_superuser: return qs_all if settings.COUPON_PERMISSIONS and api_command in settings.COUPON_PERMISSIONS: group_names = settings.COUPON_PERMISSIONS[api_command] # So the setting is left empty, so default behavior. if len(group_names) == 0: return qs_some # group specified, so only those in the group can. if bool(user.groups.filter(name__in=group_names)): return qs_all return qs_some class CouponViewSet(viewsets.ModelViewSet): """ API endpoint that lets you create, delete, retrieve coupons. """ filter_backends = (filters.SearchFilter, DjangoFilterBackend) filter_class = CouponFilter search_fields = ('code', 'code_l') serializer_class = CouponSerializer def get_queryset(self): """ Return a subset of coupons or all coupons depending on who is asking. """ api_command = 'LIST' qs_all = Coupon.objects.all() qs_some = Coupon.objects.filter(bound=True, user=self.request.user.id) if self.request.user.is_superuser: return qs_all # This is different from the normal check because it's default closed. if settings.COUPON_PERMISSIONS and api_command in settings.COUPON_PERMISSIONS: group_names
= settings.COUPON_PERMISSIONS[api_command] # So the setting is left empty, so default behavior. if len(group_names) == 0: return qs_some # group sp
ecified, so only those in the group can. if bool(self.request.user.groups.filter(name__in=group_names)): return qs_all return qs_some @method_decorator(group_required('CREATE')) def create(self, request, **kwargs): """ Create a coupon """ serializer = CouponSerializer(data=request.data, context={'request': request}) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) @method_decorator(group_required('DELETE')) def destroy(self, request, pk=None, **kwargs): """ Delete the coupon. """ coupon = get_object_or_404(Coupon.objects.all(), pk=pk) coupon.delete() return Response(status=status.HTTP_204_NO_CONTENT) def partial_update(self, request, pk=None, **kwargs): return Response(status=status.HTTP_404_NOT_FOUND) def retrieve(self, request, pk=None, **kwargs): """ Anybody can retrieve any coupon. """ value_is_int = False try: pk = int(pk) value_is_int = True except ValueError: pass if value_is_int: coupon = get_object_or_404(Coupon.objects.all(), pk=pk) else: coupon = get_object_or_404(Coupon.objects.all(), code_l=pk.lower()) serializer = CouponSerializer(coupon, context={'request': request}) return Response(serializer.data) @method_decorator(group_required('UPDATE')) def update(self, request, pk=None, **kwargs): """ This forces it to return a 202 upon success instead of 200. """ coupon = get_object_or_404(Coupon.objects.all(), pk=pk) serializer = CouponSerializer(coupon, data=request.data, context={'request': request}) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_202_ACCEPTED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) @detail_route(methods=['get']) def redeemed(self, request, pk=None, **kwargs): """ Convenience endpoint for getting list of claimed instances for a coupon. """ coupon = get_object_or_404(Coupon.objects.all(), pk=pk) qs = get_redeemed_queryset(self.request.user, coupon.id) serializer = ClaimedCouponSerializer(qs, many=True, context={'request': request}) return Response(serializer.data) @detail_route(methods=['put']) def redeem(self, request, pk=None, **kwargs): """ Convenience endpoint for redeeming. """ queryset = Coupon.objects.all() coupon = get_object_or_404(queryset, pk=pk) # Maybe should do coupon.redeem(user). # if data['expires'] < now(): data = { 'coupon': pk, 'user': self.request.user.id, } serializer = ClaimedCouponSerializer(data=data, context={'request': request}) if serializer.is_valid(): serializer.save() return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) class ClaimedCouponViewSet(viewsets.ModelViewSet): """ API endpoint that lets you retrieve claimed coupon details. """ filter_backends = (DjangoFilterBackend,) filter_fields = ('user',) serializer_class = ClaimedCouponSerializer def get_queryset(self): return get_redeemed_queryset(self.request.user) def create(self, request, **kwargs): return Response(status=status.HTTP_404_NOT_FOUND) @method_decorator(group_required('DELETE')) def destroy(self, request, pk=None, **kwargs): """ Basically un-redeem a coupon. """ redeemed = get_object_or_404(ClaimedCoupon.objects.all(), pk=pk) redeemed.delete() return Response(status=status.HTTP_204_NO_CONTENT) def partial_update(self, request, pk=None, **kwargs): return Response(status=status.HTTP_404_NOT_FOUND) def retrieve(self, request, pk=None, **kwargs): return Response(status=status.HTTP_404_NOT_FOUND) def update(self, request, pk=None, **kwargs): return Response(status=status.HTTP_404_NOT_FOUND)
from .responses import CloudWatchResponse url_bases = [ "https?://m
onitoring.(.+).amazonaws.com", ] url_paths = { '{0}/$': CloudWatch
Response.dispatch, }
#!/usr/bin/env python # vim:fileencoding=utf-8 import argparse import json import sys import os def main(sysargs=sys.argv[:]):
parser = argparse.ArgumentParser() parser.add_argument( 'instream', nargs='?', type=argparse.FileType('r'), default=sys.stdin) parser.add_argument( '-f', '--output-format', choices=['text', 'json'], default=os.environ.get('FORMAT', 'json')) args = parser.parse_args(sysargs[1:]) instance_mapped_inv = filter_json(json.load(args.ins
tream)) if args.output_format == 'text': for key, value in sorted(instance_mapped_inv.items()): sys.stdout.write('{} {}\n'.format(key, value)) else: json.dump(instance_mapped_inv, sys.stdout, indent=2) sys.stdout.write('\n') return 0 def filter_json(inv): instance_mapped_inv = {} for key, values in inv.items(): if not key.startswith('i-'): continue instance_mapped_inv[values[0]] = key return instance_mapped_inv if __name__ == '__main__': sys.exit(main())
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). """Support for JavaScript and Node.js.""" from pants.build_graph.build_file_aliases import BuildFileAliases from pants.goal.task_registrar import TaskRegistrar as task from pants.contrib.node.subsystems.resolvers.node_preinstalled_module_resolver import ( NodePreinstalledModuleResolver, ) from pants.contrib.node.subsystems.resolvers.npm_resolver import NpmResolver from pants.contrib.node.target_types import ( NodeBundle, NodeModule, NodePreinstalledModule, NodeRemoteModule, NodeTest, ) from pants.contrib.node.targets.node_bundle import NodeBundle as NodeBundleV1 from pants.contrib.node.targets.node_module import NodeModule as NodeModuleV1 from pants.contrib.node.targets.node_preinstalled_module import ( NodePreinstalledModule as NodePreinstalledModuleV1, ) from pants.contrib.node.targets.node_remote_module import NodeRemoteModule as NodeRemoteModuleV1 from pants.contrib.node.targets.node_test import NodeTest as NodeTestTargetV1 from pants.contrib.node.tasks.javascript_style import JavascriptStyleFmt, JavascriptStyleLint from pants.contrib.node.tasks.node_build import NodeBuild from pants.contrib.node.tasks.node_bundle import NodeBundle as NodeBundleTask from pants.contrib.node.tasks.node_install import NodeInstall from pants.contrib.node.tasks.node_repl import NodeRepl from pants.contrib.node.tasks.node_resolve import NodeResolve from pants.contrib.node.tasks.node_run import NodeRun from pants.contrib.node.tasks.node_test import NodeTest as NodeTestTask def build_file_aliases(): return BuildFileAliases( targets={ "node_bundle": NodeBundleV1, "node_module": NodeModuleV1, "node_preinstalled_module": NodePreinstalledModuleV1, "node_remote_module": NodeRemoteModuleV1, "node_test"
: NodeTestTargetV1, }, ) def register_goals(): # Register tasks. task(name="node", action=NodeRepl).install("repl") task(name="node", action=NodeResolve).install("resolve") task(name="node", action=NodeRun).install("run") task(name="node", action=NodeBuild).install("compile", first=True) task(name="node", ac
tion=NodeTestTask).install("test") task(name="node", action=NodeBundleTask).install("bundle") task(name="node-install", action=NodeInstall).install() # Linting task(name="javascriptstyle", action=JavascriptStyleLint).install("lint") task(name="javascriptstyle", action=JavascriptStyleFmt).install("fmt") def global_subsystems(): return (NodePreinstalledModuleResolver, NpmResolver) def target_types(): return [NodeBundle, NodeModule, NodePreinstalledModule, NodeRemoteModule, NodeTest]
import nengo from nengo.dists import Uniform import nstbot import numpy as np import joystick_node import udp import time use_bot = False if use_bot: bot = nstbot.EV3Bot() #bot.connect(ns
tbot.connection.Socket('192.168.1.160')) bot.connect(nstbot.connection.Socket('10.162.177.187')) time.sleep(1) bot.connection.send('!M+\n') bot.activate_sensor([1, 2, 3, 4], period=0.05) synapse = 0.006 msg_period = 0.1 model = nengo.Network(label='EV3
Demo') with model: joystick = nengo.Node([0,0,0,0,0,0])#joystick_node.Joystick()) control = nengo.networks.EnsembleArray(n_ensembles = 4, n_neurons=100) nengo.Connection(joystick[:4], control.input, synapse=None) motor = nengo.networks.EnsembleArray(n_ensembles = 4, n_neurons=100) for ens in motor.ensembles: ens.intercepts = Uniform(0.05, 0.9) omni_transform = np.array([[-1, 0, -1], [0.5, 1, -0.5], [1, -1, -1]]).T nengo.Connection(control.output[[1, 0, 2]], motor.input[:3], transform=omni_transform * 2, synapse=synapse) nengo.Connection(control.output[3], motor.input[3], transform=-1, synapse=synapse) def bot_motor(t, x): if use_bot: bot.motor(1, x[0], msg_period=msg_period) bot.motor(0, x[1], msg_period=msg_period) bot.motor(2, x[2], msg_period=msg_period) if abs(x[3]) > 0: bot.motor(3, x[3]*0.2, msg_period=msg_period) else: bot.motor(3, 0, msg_period=msg_period) motor_node = nengo.Node(bot_motor, size_in=4) nengo.Connection(motor.output, motor_node, synapse=synapse) def sensors(t): #left = (bot.lego_sensors[0] + bot.lego_sensors[1]) * 0.5 #right = (bot.lego_sensors[2] + bot.lego_sensors[3]) * 0.5 #joystick.output.joystick.set_vibration(left, right) if use_bot: return bot.lego_sensors else: return [0, 0, 0, 0] sensor_node = nengo.Node(sensors, size_out=4) udp_node = nengo.Node(udp.UDP(size_in=4, size_out=4, address='localhost', in_port=8889, out_port=8888), size_in=4, size_out=4) nengo.Connection(sensor_node, udp_node, synapse=None) nengo.Connection(udp_node, control.input, synapse=synapse) ''' avoid_inhibit = nengo.Ensemble(n_neurons=50, dimensions=1, intercepts=Uniform(0.2, 0.9)) nengo.Connection(joystick[5], avoid_inhibit, synapse=None) nengo.Connection(avoid_inhibit, sensors_ir.neurons, transform=[[-1]]*200, synapse=0.1) nengo.Connection(avoid_inhibit, sensors_us.neurons, transform=[[-1]]*200, synapse=0.1) ''' if True: import nengo_viz viz = nengo_viz.Viz(model) viz.slider(sensor_node) viz.value(control.output) viz.value(motor.output) viz.raster(motor.ensembles[0].neurons, n_neurons=50) viz.raster(control.ensembles[0].neurons, n_neurons=10) viz.start()
import subprocess from importlib import import_module import os import dolfin import nanopores from nanopores.tools.utilities import Log #FIXME: deprecated because of license conflict -> import from dolfin #from nanopores.meshconvert import convert2xml MESHDIR = "/tmp/nanopores" def geofile2geo(code, meta, name=None, clscale=1.): pid = str(os.getpid()) meshdir = (MESHDIR + "/" + name) if name is not None else MESHDIR if not os.path.exists(meshdir): os.makedirs(meshdir) inputfile = "%s/input%s.geo" % (meshdir, pid) outfile = "%s/out%s.msh" % (meshdir, pid) meshfile = "%s/mesh%s.xml" % (meshdir, pid) xml_sub = "%s/mesh%s_physical_region.xml" % (meshdir, pid) xml_bou = "%s/mesh%s_facet_region.xml" % (meshdir, pid) if os.path.exists(xml_sub): os.remove(xml_sub) if os.path.exists(xml_bou): os.remove(xml_bou) with Log("executing gmsh..."): # save code to .geo file with open(inputfile, "w") as f: f.write(code) # after writing the geo file, call gmsh #gmsh_out = subprocess.call(["gmsh", "-3", "-v", "1", gmsh_out = subprocess.call(["./gmsh", "-3", "-v", "1", "-format", "msh2", "-clscale", "%f" %clscale, inputfile, "-o", outfile, "-optimize"]) if gmsh_out != 0: raise RuntimeError("Gmsh failed in generating this geometry") with Log("converting to dolfin..."): subprocess.check_output(["dolfin-convert", outfile, meshfile]) # for debugging: # convert2xml(outfile, meshfile) mesh = dolfin.Mesh(meshfile) with open('%s/meta%s.txt' % (meshdir, pid), 'w') as f: f.write(repr(meta)) pdom = meta.pop("physical_domain") pbou = meta.pop("physical_boundary") subdomains = dolfin.MeshFunction("size_t", mesh, xml_sub) if pdom else None boundaries = dolfin.MeshFunction("size_t", mesh, xml_bou) if pbou else None geo = nanopores.Geometry(None, mesh, subdomains, boundaries, pdom, pbou) return geo def reconstructgeo(name=None, pid=None, params=None): # if pid is None, simply take latest mesh # if params is not None, check if they agree with meta["params"] # throw error if no matching mesh is available meshdir = (MESHDIR + "/" + name) if name is not None else MESHDIR if not os.path.exists(meshdir): raise EnvironmentError("Geometry folder does not exist yet.") if pid is None: # get pid of latest mesh files = os.listdir(meshdir) mfiles = [f for f in files if f.startswith("input")] if not mfiles: raise EnvironmentError("No existing mesh files found.") latest = max(mfiles, key=lambda f: os.path.getmtime(meshdir + "/" + f)) pid = latest.lstrip("input").rstrip(".geo") meshfile = "%s/mesh%s.xml" % (meshdir, pid) if not os.path.exists(meshfile): raise EnvironmentError( "No existing mesh files found with pid %s." % pid) print "Found existing mesh file with pid %s." % pid with open('%s/meta%s.txt' % (meshdir, pid), "r") as f: meta = eval(f.read()) if params is not None: if not params == meta["params"]: #mparams = meta["params"] #print {k: v for k, v in params.items() if k not in mparams or mparams[k] != v} #print {k: v for k, v in mparams.items() if k not in params or params[k] != v} raise EnvironmentError( "Mesh file does not have compatible parameters.") print "Mesh file has compatible parameters." print "Reconstructing geometry from %s." % meshfile xml_sub = "%s/mesh%s_physical_region.xml" % (meshdir, pid) xml_bou = "%s/mesh%s_facet_region.xml" % (meshdir, pid) mesh = dolfin.Mesh(meshfile) pdom = meta.pop("physical_domain") pbou = meta.pop("physical_boundary") subdomains = dolfin.MeshFunction("size_t", mesh, xml_sub) if pdom else None boundaries = dolfin.MeshFunction("size_t", mesh, xml_bou) if pbou else None geo = nanopores.Geometry(None, mesh, subdomains, boundaries, pdom, pbou) return geo def generate_mesh(clscale, gid, xml=True, pid="", dim=3, optimize=True, **params): """ python function that writes geo for given geometry and xml for fenics Input: clscale... scaling of characteristic length in gmsh [float] gid ... geometry identifier [string] pid ... optional process id to prevent file access clashes ... Out: geo_dict... file identifier dictionary + geo_dict """ pid = str(os.getpid()) inputfile = "input%s.geo" %pid outfile = "out%s.msh" %pid meshfile = "mesh%s.xml" %pid py4geo = "nanopores.geometries.%s.py4geo" %gid #exec('from %s import get_geo' %py4geo) mod = import_module(py4geo) get_geo = mod.get_geo # create path/to/nanoporesdata/gid/mesh if not already there meshdir = os.path.join(nanopores.DATADIR, gid, "mesh") if not os.path.exists(meshdir): os.makedirs(meshdir) fid_dict = {"fid_geo": os.path.join(meshdir, inputfile), "fid_msh": os.path.join(meshdir, outfile), } # save code to .geo file geo_dict = get_geo(**params) fobj = open(fid_dict["fid_geo"], "w") fobj.write(geo_dict["geo_code"]) fobj.close() del geo_dict["geo_code"] # after writing the geo file, call gmsh callstr = ["gmsh", "-%s" %dim, "-v", "1","-clscale", "%f" %clscale, fid_dict["fid_geo"], "-o", fid_dict["fid_msh"]] if optimize: callstr.append("-optimize") gmsh_out = subprocess.call(callstr) if gmsh_out != 0: raise RuntimeError('Gmsh failed in generating this geometry') if xm
l: fid_dict["fid_xml"] = os.path.join(meshdir, meshfile) subprocess.check_output(["dolfin-convert", fid_dict["fid_msh"], fid_dict["fid_xml"
]]) # for debugging: #convert2xml(fid_dict["fid_msh"], fid_dict["fid_xml"]) # optionally, write metadata to file ("meta" should be dict) if "meta" in geo_dict: save(geo_dict["meta"], meshdir, "meta%s" %pid) geo_dict.update(fid_dict) return geo_dict def save(data, dir=".", name="file"): with open('%s/%s.txt' % (dir,name), 'w') as f: f.write(repr(data)) # ----- # to test script run '>> python -m nanopores.geo2xml' if __name__ == '__main__': params = {"x0": None} print(generate_mesh( clscale=7.0, gid="W_2D_geo", xml=False, **params) )
from django.conf.urls import patterns, include, url urlpatterns = patterns('remotestatus.views', url(r'^remote-box/(?P<remote_box_id>[0-9]+)/$', 'remote_box_detail
', name='rs-remote-box-detail'), url(r'^(?P<call_round_id>[0-9]+)/$', 'dashboard', name='rs-dashboard'), url(r'^$', 'dash
board', name='rs-dashboard'), )
request=self.request, user=self.request.user, email_address=email_address) return super(EmailView, self).form_valid(form) def post(self, request, *args, **kwargs): res = None if "action_add" in request.POST: res = super(EmailView, self).post(request, *args, **kwargs) elif request.POST.get("email"): if "action_send" in request.POST: res = self._action_send(request) elif "action_remove" in request.POST: res = self._action_remove(request) elif "action_primary" in request.POST: res = self._action_primary(request) res = res or HttpResponseRedirect(reverse('account_email')) # Given that we bypassed AjaxCapableProcessFormViewMixin, # we'll have to call invoke it manually... res = _ajax_response(request, res) else: # No email address selected res = HttpResponseRedirect(reverse('account_email')) res = _ajax_response(request, res) return res def _action_send(self, request, *args, **kwargs): email = request.POST["email"] try: email_address = EmailAddress.objects.get( user=request.user, email=email, ) get_adapter().add_message(request, messages.INFO, 'account/messages/' 'email_confirmation_sent.txt', {'email': email}) email_address.send_confirmation(request) return HttpResponseRedirect(self.get_success_url()) except EmailAddress.DoesNotExist: pass def _action_remove(self, request, *args, **kwargs): email = request.POST["email"] try: email_address = EmailAddress.objects.get( user=request.user, email=email ) if email_address.primary: get_adapter().add_message(request, messages.ERROR, 'account/messages/' 'cannot_delete_primary_email.txt', {"email": email}) else: email_address.delete() signals.email_removed.send(sender=request.user.__class__, request=request, user=request.user, email_address=email_address) get_adapter().add_message(request, messages.SUCCESS, 'account/messages/email_deleted.txt', {"email": email}) return HttpResponseRedirect(self.get_success_url()) except EmailAddress.DoesNotExist: pass def _action_primary(self, request, *args, **kwargs): email = request.POST["email"] try: email_address = EmailAddress.objects.get_for_user( user=request.user, email=email ) # Not primary=True -- Slightly different variation, don't # require verified unless moving from a verified # address. Ignore constraint if previous primary email # address is not verified. if not email_address.verified and \ EmailAddress.objects.filter(user=request.user, verified=True).exists(): get_adapter().add_message(request, messages.ERROR, 'account/messages/' 'unverified_primary_email.txt') else: # Sending the old primary address to the signal # adds a db query. try: from_email_address = EmailAddress.objects \ .get(user=request.user, primary=True) except EmailAddress.DoesNotExist: from_email_address = None email_address.set_as_primary() get_adapter() \ .add_message(request, messages.SUCCESS, 'account/messages/primary_email_set.txt') signals.email_changed \ .send(sender=request.user.__class__, request=request, user=request.user,
from_email_address=from_email_address, to_email_address=email_address) return HttpResponseRedirect(self.get_success_url()) except EmailAddress.DoesNotExist: pass def get_context_data(self, **kwargs): ret = super(EmailView, self).get_context_data(**kwargs) # NOTE: For backwards compatibility ret['add_email_form'] = ret.get('form') # (end NOTE)
return ret email = login_required(EmailView.as_view()) class PasswordChangeView(AjaxCapableProcessFormViewMixin, FormView): template_name = "account/password_change.html" form_class = ChangePasswordForm success_url = reverse_lazy("account_change_password") def get_form_class(self): return get_form_class(app_settings.FORMS, 'change_password', self.form_class) @sensitive_post_parameters_m def dispatch(self, request, *args, **kwargs): if not request.user.has_usable_password(): return HttpResponseRedirect(reverse('account_set_password')) return super(PasswordChangeView, self).dispatch(request, *args, **kwargs) def get_form_kwargs(self): kwargs = super(PasswordChangeView, self).get_form_kwargs() kwargs["user"] = self.request.user return kwargs def form_valid(self, form): form.save() if (update_session_auth_hash is not None and not app_settings.LOGOUT_ON_PASSWORD_CHANGE): update_session_auth_hash(self.request, form.user) get_adapter().add_message(self.request, messages.SUCCESS, 'account/messages/password_changed.txt') signals.password_changed.send(sender=self.request.user.__class__, request=self.request, user=self.request.user) return super(PasswordChangeView, self).form_valid(form) def get_context_data(self, **kwargs): ret = super(PasswordChangeView, self).get_context_data(**kwargs) # NOTE: For backwards compatibility ret['password_change_form'] = ret.get('form') # (end NOTE) return ret password_change = login_required(PasswordChangeView.as_view()) class PasswordSetView(AjaxCapableProcessFormViewMixin, FormView): template_name = "account/password_set.html" form_class = SetPasswordForm success_url = reverse_lazy("account_set_password") def get_form_class(self): return get_form_class(app_settings.FORMS, 'set_password', self.form_class) @sensitive_post_parameters_m def dispatch(self, request, *args, **kwargs): if request.user.has_usable_password(): return HttpResponseRedirect(reverse('account_change_password')) return super(PasswordSetView, self).dispatch(request, *args, **kwargs) def get_form_kwargs(self): kwargs = super(PasswordSetView, self).get_form_kwargs() kwargs["user"] = self.request.user return kwargs def form_valid(self, form): form.save() get_adapter().add_message(s
# coding=utf8 # """ odtasks
模块的测试用例 """
import unittest
def pretty_date(time=False): """ Get a datetime object or a int() Epoch timestamp and return a pretty string like 'an hour ago', 'Yesterday', '3 months ago', 'just now', etc """ from datetime import datetime now = datetime.now() if type(time) is int: diff = now - datetime.fromtimestamp(time) elif isinstance(time,datetime): diff = now - time elif not time: diff = now - now second_diff = diff.seconds day_diff = diff.days if day_diff < 0: return '' if day_diff == 0: if second_diff < 10: return "just now" if second_diff < 60: return str(second_diff) + " seconds ago" if second_diff < 120
:
return "a minute ago" if second_diff < 3600: return str( second_diff / 60 ) + " minutes ago" if second_diff < 7200: return "an hour ago" if second_diff < 86400: return str( second_diff / 3600 ) + " hours ago" if day_diff == 1: return "Yesterday" if day_diff < 7: return str(day_diff) + " days ago" if day_diff < 31: return str(day_diff/7) + " weeks ago" if day_diff < 365: return str(day_diff/30) + " months ago" return str(day_diff/365) + " years ago"
enlaces_iniciales = ['http://www.edutopia.org/project-based-learning-history', 'http://bie.org/about/why_pbl', 'http://es.wikipedia.org/wiki/Aprendizaje_basado_en_proyectos', 'http://en.wikipedia.org/wiki/Project-based_learning', 'https://www.youtube.com/watch?v=LMCZvGesRz8', 'http://www.learnnc.org/lp/pages/4753', 'http://www.ascd.org/publications/educational_leadership/sept10/vol68/num01/seven_essentials_for_project-based_learning.aspx', 'http://eric.ed.gov/?q=%22%22&ff1=subActive+Learning', 'http://eric.ed.gov/?q=%22%22&ff1=subStudent+Projects'] from learningobjects.utils.alchemyapi import AlchemyAPI from learningobjects.utils.parsers import * from learningobjects.utils.search import * from ftfy import fix_text import urllib url = enlaces_iniciales[0] texto = '' tags = set() for url in enlaces_iniciales: gp_desc = GooseParser(url).describe() texto += gp_desc.text for tag in gp_desc.tags: tags.add(tag.strip()) texto = fix_text(texto) more_links = set() alchemyapi = AlchemyAPI() response = alchemyapi.keywords("text", texto) concept = response['keywords'][0]['text'] wiki = Wikipedia() for res in wiki.search(concept): more_links.add(res) google = Google() for res in googl
e.search('related:'+url): more_links.add(res) if len(more_links) > 30: break duck = DuckDuckGo() for link in enlaces_iniciales: for res in duck.search_related(link): more_links.add(res) """ response = alc
hemyapi.entities("text", texto) if response['status'] == 'OK': noticia.entities = response["entities"] else: print response['statusInfo'] """
#!/usr/bin/env python2.5 """A test provider for the stress testing.""" # change registry this
often [msec] registryChangeTimeout = 2017 from ContextKit.flexiprovider import * import gobject import time import os def update(): t = time.time() dt = int(1000*(t - round(t))) gobject.timeout_add(1000 - dt, update) v = int(
round(t)) fp.set('test.int', v) fp.set('test.int2', v) print t return False pcnt = 0 def chgRegistry(): global pcnt pcnt += 1 if pcnt % 2: print "1 provider" os.system('cp 1provider.cdb tmp.cdb; mv tmp.cdb cache.cdb') else: print "2 providers" os.system('cp 2providers.cdb tmp.cdb; mv tmp.cdb cache.cdb') return True gobject.timeout_add(1000, update) # uncoment this to see the "Bus error" XXX gobject.timeout_add(registryChangeTimeout, chgRegistry) fp = Flexiprovider([INT('test.int'), INT('test.int2')], 'my.test.provider', 'session') fp.run()
'Compartida','Compartida'), ('Invadida','Invadida'), ('Traspasada','Traspasada'), ('Prestada','Prestada'), ('Other','Otro'), ] type_dwelling_data=[ ('Quinta', 'Quinta'), ('Casa', 'Casa'), ('Apartamento', 'Apartamento'), ('Rancho', 'Rancho'), ('Barraca', 'Barraca'), ('Habitacion', 'Habitación'), ('Other','Otro'), ] name = fields.Char( string='Nombre de la familia', readonly=True, ) code_family = fields.Char( string='Código de la familia', readonly=True, ) communal_council_id = fields.Many2one( 'tcc.communal.council', string='Consejo comunal', default=default_communal_council, readonly=True, ) apartment = fields.Char( string='Apartamento', ) floor = fields.Char( string='Piso', ) house_id = fields.Many2one( 'tcc.dwelling.house', string='Casa', ) edifice_id = fields.Many2one( 'tcc.dwelling.edifice', string='Edificio', ) tenancy = fields.Selection( tenancy_data, string='Forma de Tenencia', default='Propia', ) type_dwelling = fields.Selection( type_dwelling_data, string='Tipo de Vivienda', default='Casa', ) terreno_propio = fields.Selection( [('Si', 'Si'), ('No', 'No'),], string='Terreno propio', ) pertenece_ocv = fields.Selection( [('Si', 'Si'), ('No', 'No'),], string='Pertenece a (OCV)', ) type_walls_ids = fields.Many2many( 'tcc.family.type.walls', 'tcc_family_type_walls_rel', 'family_id', 'type_walls_id', string='Tipo de pared' ) type_roof_ids = fields.Many2many( 'tcc.family.type.roof', 'tcc_family_type_roof_rel', 'family_id', 'type_roof_id', string='Tipo de techo' ) equipment_dwelling_ids = fields.Many2many( 'tcc.family.dwelling.equipment', 'tcc_family_equipment_dwelling_rel', 'family_id', 'equipment_id', string='Enseres de la vivienda' ) salubrity_id = fields.Many2one( 'tcc.family.dwelling.salubrity', string='Salud de vivienda', ) pests_dwelling_ids = fields.Many2many( 'tcc.family.dwelling.pests', 'tcc_family_dwelling_pests_rel', 'family_id', 'pest_id', string='Insectos y roedores' ) pets_ids = fields.Many2many( 'tcc.family.dwelling.pets', 'tcc_family_dwelling_pets_rel', 'family_id', 'pest_id', string='Animales domésticos' ) room_ids = fields.Many2many( 'tcc.family.dwelling.room', 'tcc_family_dwelling_room_rel', 'family_id', 'room_id', string='Áreas de la vivienda' ) cant_room = fields.Integer(string='cantidad de habitaciones', ) disease_ids = fields.Many2many( 'tcc.family.disease', 'tcc_family_disease_rel', 'family_id', 'disease_id', string='Enfermedades en la familia' ) need_help = fields.Selection( [('Si', 'Si'), ('No', 'No'),], string='Necesita ayuda', help="Necesita ayuda para familiares enfermos" ) name_help = fields.Char( string='¿Cuáles ayudas?', ) commercial_activity_hose = fields.Selection( [('Si', 'Si'), ('No', 'No'),], string='Actividad commercial en la vivienda', ) commercial_activity_ids = fields.Many2many( 'tcc.family.commercial.activity', 'tcc_family_commercial_activity_rel', 'family_id', 'commercial_activity_id', string='Venta de:' ) family_income_id = fields.Many2one( 'tcc.family.income', string='Ingreso familiar', ) arrival_date = fields.Date( string='Fecha de llegada a la comunidad', required=True, ) person_ids = fields.One2many( 'tcc.persons', 'family_id', string='Grupo familiar', help="Casas ubicadas en el sector del Consejo comunal.", ) children_street = fields.Boolean( default=False, string='Niños en la calle' ) quantity_children_street = fields.Integer( string='¿Cuántos niños?', ) indigent = fields.Boolean( default=False, string='Indigentes' ) quantity_indigent = fields.Integer( string='¿Cuántos indigentes?', ) terminally_patient = fields.Boolean( default=False, s
tring='Enfermos terminales' ) quantity_terminally_patient = fields.Integer( string='¿Cuántos enfermos terminales?', ) handicapped = fields.Boolean( default=False, string='Discapacitados' ) quantity_handicapped = fields.Integer( string='¿Cuántos Discapacitados?',
) water_white_ids = fields.Many2many( 'tcc.family.white.water', 'tcc_family_white_water_rel', 'family_id', 'water_id', string='Aguas Blancas' ) water_meter = fields.Boolean( string='Medidor de agua', ) wastewater_ids = fields.Many2many( 'tcc.family.waste.water', 'tcc_family_waste_water_rel', 'family_id', 'wastewater_id', string='Aguas Residuales', ) gas = fields.Selection( [('Bombona', 'Bombona'), ('Tuberia', 'Tubería'), ('no_posee', 'No posee'),], string='Gas', help="¿Cómo es el sistema de distribución de gas en su vivienda?" ) electric_system = fields.Selection( [('publica', 'Electricidad Pública'), ('planta_electrica', 'Planta Eléctrica Propia'), ('no_posee', 'No Posee'),], string='Tipo de electricidad', help="¿Cómo es el sistema de distribución de electricidad en su vivienda?" ) light_meter = fields.Boolean( string='Medidor de luz', ) trash_ids = fields.Many2many( 'tcc.family.collect.trash', 'tcc_family_collect_trash_rel', 'family_id', 'trash_id', string='Recolección de basura' ) telephony_ids = fields.Many2many( 'tcc.family.services.telephony', 'tcc_family_services_telephony_rel', 'family_id', 'telephony_id', string='Tipo de telefonía' ) transport_ids = fields.Many2many( 'tcc.family.servi
import contextlib import datetime import functools import heapq import time from numbers import Number class Timeline(object): def __init__(self, start_time=None): super(Timeline, self).__init__() current_time = self._real_time() self._forced_time = None self._scheduled = [] self._time_factor = 1 self._time_correction = None if start_time is not None: self._correct_time(base=start_time) def is_modified(self): return self._time_correction is not None def _real_sleep(self, seconds): time.sleep(seconds) def _real_time(self): return time.time() def set_time_factor(self, factor): """ Sets the time factor -- the factor by which the virtual time advances compared to the real time. If set to 0, this means the virtual time does not progress at all until sleeps are performed """ if factor < 0: raise ValueError("Cannot set negative time factor") self._correct_time() self._time_factor = factor def get_time_factor(self): """ Retrieves the current time factor """ return self._time_factor def freeze(self): """ Shortcut for :func:`.set_time_factor`(0) """ self.set_time_factor(0) def _correct_time(self, base=None): current_time = self._real_time() if base is None: base = current_time if self._time_correction is None: self._time_correction = TimeCorrection(base, current_time) self._time_correction.virtual_time = self.time() self._time_correction.real_time = self._real_time() # shift stems from the previous correction... self._time_correction.shift = 0 def sleep(self, seconds): """ Sleeps a given number of seconds in the virtual timeline """ if not isinstance(seconds, Number): raise ValueError( "Invalid number of seconds specified: {0!r}".format(seconds)) if seconds < 0: raise ValueError("Cannot sleep negative number of seconds") if self._time_factor == 0: self.set_time(self.time() + seconds) else: end_time = self.time() + seconds while self.time() < end_time: self._real_sleep(max(0, (end_time - self.time()) / self._time_factor)) self.trigger_past_callbacks() def sleep_wait_all_scheduled(self): """ Sleeps enough time for all scheduled callbacks to occur """ while self._scheduled: self.sleep(max(0, self._scheduled[0].time - self.time())) def sleep_stop_first_scheduled(self, sleep_seconds): """ Sleeps the given amount of time, but wakes up if a scheduled event exists before the destined end time """
if self._scheduled: sleep_seconds = min( max(0, self._scheduled[0].time - self.time()), sleep_seconds) self.sleep(sleep_seconds) def trigger_past_callbacks(self): current_time
= self.time() while self._scheduled and self._scheduled[0].time <= current_time: scheduled = heapq.heappop(self._scheduled) with self._get_forced_time_context(scheduled.time): scheduled.callback() def set_time(self, time, allow_backwards=False): delta = time - self.time() if delta < 0 and not allow_backwards: # Can't move time backwards. Not an exception, if using threads. return self._time_correction.shift += delta def time(self): """ Gets the virtual time """ if self._forced_time is not None: return self._forced_time returned = self._real_time() if self._time_correction is not None: returned = self._time_correction.virtual_time + self._time_correction.shift + (returned - self._time_correction.real_time) * self._time_factor return returned @contextlib.contextmanager def _get_forced_time_context(self, time): prev_forced_time = self._forced_time self._forced_time = time try: yield finally: self._forced_time = prev_forced_time def schedule_callback(self, delay, callback, *args, **kwargs): if delay < 0: raise ValueError("Cannot schedule negative delays") item = ScheduledItem(self.time() + delay, functools.partial(callback, *args, **kwargs)) heapq.heappush(self._scheduled, item) def __repr__(self): return "<Timeline (@{})>".format(datetime.datetime.fromtimestamp(self.time()).ctime()) class ScheduledItem(object): def __init__(self, time, callback): super(ScheduledItem, self).__init__() self.time = time self.callback = callback def __lt__(self, other): if not isinstance(other, ScheduledItem): return NotImplemented return self.time < other.time class TimeCorrection(object): """ Utility class used for keeping records of time shifts or corrections """ def __init__(self, virtual_time, real_time): super(TimeCorrection, self).__init__() self.virtual_time = virtual_time self.real_time = real_time self.shift = 0
#-------------------------------
-------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. #--------------------------------------------------------------------------------------------
- #pylint: skip-file # coding=utf-8 # -------------------------------------------------------------------------- # Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .deployment_avail_set import DeploymentAvailSet from .template_link import TemplateLink from .parameters_link import ParametersLink from .provider_resource_type import ProviderResourceType from .provider import Provider from .basic_dependency import BasicDependency from .dependency import Dependency from .deployment_properties_extended import DeploymentPropertiesExtended from .deployment_extended import DeploymentExtended from .avail_set_creation_client_enums import ( DeploymentMode, ) __all__ = [ 'DeploymentAvailSet', 'TemplateLink', 'ParametersLink', 'ProviderResourceType', 'Provider', 'BasicDependency', 'Dependency', 'DeploymentPropertiesExtended', 'DeploymentExtended', 'DeploymentMode', ]
from rekall import resources from rekall_agent import testlib from rekall_agent.client_actions import files from rekall_agent.client_actions import tsk class TestTSK(testlib.ClientAcionTest): def setUp(self): super(TestTSK, self).setUp() # Add a fake mount point to the image. mount_tree_hook = files.MountPointHook(session=self.session) mount_tree = {} mount_tree_hook._add_to_tree( mount_tree,
"/mnt/", resources.get_resource("winexec_img.dd", package="rekall_agent", prefix="test_data"), "ext2") self.session.SetParameter("mount_points", mount_tree) def testTSK(self): action = tsk.TSKListDirectoryAction(session=self.session) action.path = "/mnt/a" action.vfs_location = self.get_test_location("test") self.assert_baseline("testTSK", list(action.collect(
))) def testTSKRecursive(self): action = tsk.TSKListDirectoryAction(session=self.session) action.path = "/mnt/a" action.depth = 2 action.vfs_location = self.get_test_location("test") self.assert_baseline("testTSKRecursive", list(action.collect())) if __name__ == "__main__": testlib.main()
#!/usr/bin/env python from setuptools import setup, find_packages setup( name='hailtop', version="0.0.1", author="Hail Team", author_email="hail@broadinstitute.org", description="Top level Hail module.", url="https://hail.is", project_urls={ 'Documentation': 'https://hail.is/docs/0.2/', 'Repository': 'https://github.com/hail-is/hail', }, packages=find_packages('.'), package_dir={ 'hailtop': 'hailtop'}, packa
ge_data={ 'hailtop.hailctl': ['hail_version', 'deploy.yaml']}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", ], python_requires=">=3.6", entry_points={ 'console_scripts': ['hailctl = hailtop.hailctl.__main__:main'] }, setup_requires=["pytest-runner",
"wheel"] )
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: ohai short_description: Returns inventory data from I(Ohai) description: - Similar to the M(facter) module, this runs the I(Ohai) discovery program (U(http://wiki.opscode.com/display/chef/Ohai)) on the remote host and returns JSON inventory data. I(Ohai) data is a bit more verbose and nested than I(facter). versi
on_added: "0.6" options: {} notes: [] requirements: [ "ohai" ] author: - "Ansible Core Team" - "Michael DeHaan (@mpdehaan)" ''' EXAMPLES = ''' # Retrieve (ohai) data from all Web servers and store in one-file per host ansible webservers -m ohai --tree=/tmp/ohaidata ''' def main(): module = AnsibleModule( argument_spec = dict() ) cmd = ["/usr/bin/env", "ohai"] rc, out, err = module.run_command(cmd, check_rc=True) module.exit_js
on(**json.loads(out)) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
#------------------------------------------------------------------------------- # coding=utf8 # Name: 模块1 # Purpose: # # Author: zhx # # Created: 10/05/2016 # Copyright: (c) zhx 2016 # Licence: <your licence> #------------------------------------------------------------------------------- import openpyxl import jieba threshold = 2140 popular = 0 def main(): cctv_data = openpyxl.load_workbook("cctv.xlsx") cctv_keywords = openpyxl.load_workbook("cctv_keywords.xlsx") cctv_new = openpyxl.Workbook() new_sheet = cctv_new.active #print cctv_data.get_sheet_names() sheet1 = cctv_keywords["Sheet"] sheet2 = cctv_data["Sheet"] words = {} for r in xrange(1,36003): word = sheet1.cell(row=r,column=1).value word_min = shee
t1.cell(row=r,column=2).value word_max = sheet1.cell(row=r,column=3).value word_mean = sheet1.cell(row=r,column=4).value words[word] = [word_min,word_max,word_mean] for r in xrange(2,4749): print r content = sheet2.cell(row=r,column=3).value time = sheet2.cell(row=r,column=11).value
like = sheet2.cell(row=r,column=5).value repost = sheet2.cell(row=r,column=6).value if like == '赞': like = '0' if repost =='转发': repost = '0' like_repost = int(like)+int(repost) if like_repost>threshold: popular =1 else: popular =0 hour = int(time[1:3]) minute =int (time[4:]) time = hour*60 + minute new_sheet.cell(row=r,column=10).value = time new_sheet.cell(row=r,column=11).value = like_repost if content ==None: continue print r seg_list = jieba.cut(content, cut_all = True) wordsplite = ' '.join(seg_list) wordsplite = wordsplite.split(' ') maxlike = 0 max_word ='' min_word ='' mean_word='' minlike = 9999999 tmplist = [] tmpdic ={} for w in wordsplite: if words.has_key(w): tmpdic[w] =int(words[w][2]) tmplist.append(int(words[w][2])) likes = int(words[w][2]) if likes<minlike: minlike = likes min_word = w if likes>maxlike: maxlike = likes max_word = w else: continue if len(tmplist)!=0: tmplist.sort() mean = tmplist[int(len(tmplist)/2)] for w in tmpdic: if tmpdic[w]==mean: mean_word =w if min_word!='': new_sheet.cell(row=r,column=1).value = words[min_word][0] new_sheet.cell(row=r,column=2).value = words[min_word][1] new_sheet.cell(row=r,column=3).value = words[min_word][2] if max_word!='': new_sheet.cell(row=r,column=4).value = words[max_word][0] new_sheet.cell(row=r,column=5).value = words[max_word][1] new_sheet.cell(row=r,column=6).value = words[max_word][2] if mean_word!='': new_sheet.cell(row=r,column=7).value = words[mean_word][0] new_sheet.cell(row=r,column=8).value = words[mean_word][1] new_sheet.cell(row=r,column=9).value = words[mean_word][2] cctv_new.save("train_feature_keyword_reg.xlsx") main()
#!/usr/bin/env python # -*- coding: ut
f-8 -*- # @first_date 20160129 # @date 20160129 # @version 0.0 """auth for Users API """ from flask import abort from flask.views import MethodView from flask.ext.login import login_required, current_user from sqlalchemy.exc import IntegrityError from webargs.flaskparser import use_args from . import users_bp from ..mixins import RestfulViewMixin from ...models.users import User from ...schemas.users import SignupSchema, Lo
ginSchema, ResetPasswordSchema from ...error_handlers import user_errors class SignupView(RestfulViewMixin, MethodView): @use_args(SignupSchema, locations=('json',)) def post(self, args): user = User(**args) try: user.add() except IntegrityError as err: err.data = user_errors.USER_ERR_1001_REGISTERED_ACC raise return self.get_response(status=201) class LoginView(RestfulViewMixin, MethodView): @use_args(LoginSchema, locations=('json',)) def post(self, args): user = User.authenticate(**args) if not user: abort(401) key = user.login() # It will return key return self.get_response({"key": key}, status=200) class LogoutView(RestfulViewMixin, MethodView): decorators = (login_required,) def post(self): user = current_user user.logout() return self.get_response(status=200) class ResetPasswordView(RestfulViewMixin, MethodView): decorators = (login_required,) @use_args(ResetPasswordSchema, locations=('json',)) def put(self, args): user = current_user if not user.check_password(args['old_password']): abort(401) user.set_password(args['new_password']) user.update() return self.get_response(status=200) # Url patterns: To register views in blueprint users_bp.add_url_rule('/signup', view_func=SignupView.as_view('signup')) users_bp.add_url_rule('/login', view_func=LoginView.as_view('login')) users_bp.add_url_rule('/logout', view_func=LogoutView.as_view('logout')) users_bp.add_url_rule('/reset_password', view_func=ResetPasswordView.as_view('reset-password'))
import os import re import lxml.etree as etree import subprocess import urllib2 """ TODO: move patchwatcher in this dir """ def improvemailaddr(strings): if '<' in strings and '>' in strings: tmplist = strings[strings.find('<')+1:strings.find('>')].split() retstrings = "%s@" % tmplist[0] first = 0 for n in tmplist[1:]: if first == 0: first = 1 retstrings += n else: retstrings += '.%s' % n return '%s <%s>' % (strings[:strings.find('<')], retstrings) def createpatch(htmllink): returnstr = "" strings = urllib2.urlopen(htmllink).read().decode("utf-8") xml = etree.HTML(strings) try: lilist = xml.xpath('/html/body/ul/li') pre = xml.xpath('/html/body/pre')[0] except: raise Exception("Fail to parse html") for i in lilist: if i.getchildren()[0].text == "From": author = i.getchildren()[0].tail[2:] elif i.getchildren()[0].text == "Subject": subject = i.getchildren()[0].tail[2:] elif i.getchildren()[0].text == "Date": date = i.getchildren()[0].tail[2:] tmpstr = improvemailaddr(author) if '\r\n\t' in subject: subject = subject.replace('\r\n\t', ' ') if '\r\n' in subject: subject = subject.replace('\r\n', '') if '\t' in subject: subject = subject.replace('\t', ' ') returnstr += 'From: %s\n' % tmpstr returnstr += 'Date: %s\n' % date returnstr += 'Subject: %s\n\n' % subject if pre.getchildren() == []: if pre.text: returnstr += pre.text else: if pre.text: returnstr += pre.text for n in pre.getchildren(): if n.text: returnstr += n.text returnstr += n.tail if "diff --git" not in returnstr: #this is not a patch return None, None return returnstr, subject def create_patch_set(html_link_list): def _parseSubject(subject): """ TODO: move utils in a right place and use it here """ info = '' labels = [] cleansubj = subject.split(']')[-1][1:] if "PATCH" not in subject: return [info, cleansubj, labels] for i in re.findall('\[[^\[\]]+\]', subject): tmplist = i[1:-1].replace('PATCH', ' ').split() for n in tmplis
t: if '/' in n: info = n labels.append(n) return [info,
cleansubj, labels] ret_patch = '' patch_dict = {} for html_link in html_link_list: tmppatch, tmpsubject = createpatch(html_link) if not tmpsubject: continue index, _, _ = _parseSubject(tmpsubject) if index == '': """ not sure what happened """ ret_patch += tmppatch continue patch_dict[str(index)] = tmppatch queue = patch_dict.keys() queue.sort() for i in queue: ret_patch += patch_dict[i] return ret_patch if __name__ == '__main__': print createpatch("https://www.redhat.com/archives/libvir-list/2016-June/msg01022.html")
""" Fallback to callee definition when definition not found. - https://github.com/davidhalter/jedi/issues/131 - https://github.com/davidhalter/jedi/pull/149 """ """Parenthesis closed at next line.""" # Ignore these definitions for a little while, not sure if we really want them. # python <= 2.5 #? isinstance isinstance( ) #? isinstance isinstance( ) #? isinstance isinstance(None, ) #? isinstance isinstance(None, ) """Parenthesis closed at same line.""" # Note: len('isinstance(') == 11 #? 11 isinstance isinstance() # Note: len('isinstance(None,') == 16 ##? 16 isinstance
isinstance(None,) # Note: len('isinstance(None,') == 16 ##? 16 isinstance isinstance(None, ) # Note: len('isinstance(None, ') == 17 ##? 17 isinstance isinstance(None, ) # Note: len('isinstance( ') == 12 ##? 12 isinstance isinstance( ) """Unclosed parenthesis.""" #? isinstance isinstance( def x(): pass # acts like EOF ##? isinstance isinstance( def x(): pass # acts like EOF #? isinst
ance isinstance(None, def x(): pass # acts like EOF ##? isinstance isinstance(None,
#!/usr/bin/python """ fanhaorename.py """ import os import os.path import logging import fileorganizer from fileorganizer import _helper from fileorganizer.replacename import _replacename __author__ = "Jack Chang <wei0831@gmail.com>" def _tagHelper(tag): """ TODO """ result = "" for c in tag: if c.isalpha(): result += "[{0}{1}]".format(c.lower(), c.upper()) else: result += c return result def fanhaorename(work_dir, tag, exclude=None, mode=0, wetrun=False, this_name=os.path.basename(__file__)): """ Batch Rename Fanhao \b Args: work_dir (str): Working Directory tag (str): Fanhao tag find (str, optional): Regex string to find in filename/f
oldername replace (str, optional): Regex string to replace in filename/foldername exclude (str, optional): Regex string to exclude in mattches mode (int, optional): 0=FILE ONLY, 1=FOLDER ONLY, 2=BOTH wetrun (bool, optional): Test Run or not """
_find_dir = r"(.*)({0})(-|_| )*(\d\d\d)(.*)".format(_tagHelper(tag)) _replace_dir = r"{0}-\4".format(tag) _find_file = _find_dir + r"(\.(.*))" _replace_file = _replace_dir + r"\6" _helper.init_loger() this_run = "WET" if wetrun else "DRY" loger = logging.getLogger(this_name) loger.info("[START] === %s [%s RUN] ===", this_name, this_run) loger.info("[DO] Rename \"%s\" fanhao in \"%s\"; Mode %s", tag, work_dir, mode) if mode in (0, 2): # mode 0 and 2 for item in _replacename(_find_file, _replace_file, work_dir, 0, exclude): item.commit() if wetrun else loger.info("%s", item) if mode in (1, 2): # mode 1 and 2 for item in _replacename(_find_dir, _replace_dir, work_dir, 1, exclude): item.commit() if wetrun else loger.info("%s", item) loger.info("[END] === %s [%s RUN] ===", this_name, this_run) if __name__ == "__main__": fileorganizer.cli.cli_fanhaorename()
"""Example of how to convert a RayTra
nsform operator to a tensorflow layer. This example is similar to ``tensorflow_layer_matrix``, but demonstrates how more advanced operators, such as a ray transform, can be handled. """ from __future__ import print_function import tensorflow as tf import numpy as np import odl import odl.contrib.tensorflow sess = tf.InteractiveSession() tf.global_variables_initializer().run() s
pace = odl.uniform_discr([-64, -64], [64, 64], [128, 128], dtype='float32') geometry = odl.tomo.parallel_beam_geometry(space) ray_transform = odl.tomo.RayTransform(space, geometry) x = tf.constant(np.asarray(ray_transform.domain.one())) z = tf.constant(np.asarray(ray_transform.range.one())) # Create tensorflow layer from odl operator odl_op_layer = odl.contrib.tensorflow.as_tensorflow_layer( ray_transform, 'RayTransform') # Add empty axes for batch and channel x_reshaped = x[None, ..., None] z_reshaped = z[None, ..., None] # Lazily apply operator in tensorflow y = odl_op_layer(x_reshaped) # Evaluate using tensorflow print(y.eval()) # Compare result with pure ODL print(ray_transform(x.eval())) # Evaluate the adjoint of the derivative, called gradient in tensorflow # We need to scale by cell size to get correct value since the derivative # in tensorflow uses unweighted spaces. scale = ray_transform.range.cell_volume / ray_transform.domain.cell_volume print(tf.gradients(y, [x_reshaped], z_reshaped)[0].eval() * scale) # Compare result with pure ODL print(ray_transform.derivative(x.eval()).adjoint(z.eval()))
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import taas.user.models class Migration(migrations.Migration): dependencies = [ ('user', '0002_remove_username'), ] operations = [ migrations.Alt
erModelManagers( name='user',
managers=[ ('objects', taas.user.models.CustomUserManager()), ], ), ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2013 University of Oslo, Norway # # This file is part of Cerebrum. # # Cerebrum is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Cerebrum is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Cerebrum; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Webservice functionality for Resource management in the TSD project. Resources are registered in Cerebrum, but they are administered by other systems. For those systems to be able to retrieve the information, we are giving it through a SOAP webservice. """ # TODO: check if something could be removed from here: import random, hashlib import string, pickle from mx.DateTime import RelativeDateTime, now import twisted.python.log import cereconf from Cerebrum import Errors from Cerebrum.Utils import Factory from Cerebrum.modules.dns import Utils, Subnet, AAAARecord, IPv6Number from Cerebrum.modules.cis import Utils log = Utils.SimpleLogger() class ResourceService(object): """The functionality for the Resource service. Note that this main class should be independent of what server we use. It is important that each thread gets its own instance of this class, to avoid race conditions. Another thing to remember is that database connections should be closed. This is to avoid having old and idle database connections, as the garbage collector can't destroy the instances, due to twisted's reuse of threads. """ # The default DNS zone to use: default_zone = 'tsd.usit.no.' def __init__(self, operator_id): """Constructor. Since we are using access control, we need the authenticated entity's ID as a parameter. """ self.db = Factory.get('Database')() self.db.cl_init(change_program='resource_service') self.co = Factory.get('Constants')(self.db) self.finder = Utils.Find(self.db, self.default_zone) self.subnet = Subnet.Subnet(self.db) self.aaaa = AAAARecord.AAAARecord(self.db) self.ip = IPv6Number.IPv6Number(self.db) # TODO: could we save work by only using a single, shared object of # the auth class? It is supposed to be thread safe. #self.ba = BofhdAuth(self.db) self.operator_id = operator_id def close(self): """Explicitly close this instance, as python's garbage collector can't close the database connections when Twisted is reusing the threads. """ if hasattr(self, 'db'): try: self.db.close() except Exception, e: log.warning("Problems with db.close: %s" % e) else: # TODO: this could be removed later, when it is considered stable log.warning("db doesn't exist") def search_mac_addresses(self, hostname, mac_address): """Search for hostnames and their MAC addresses.""" m_id = a_id = None if hostname: a_id = self.finder.find_a_record(hostname) self.aaaa.
clear() self.aaaa.find(a_id) if not self.aaaa.mac: return () if mac_address and mac_address != self.aaaa.mac: return () re
turn ((self.aaaa.name, self.aaaa.mac),) # Return either the complete list of hosts and their MAC addresses, or # only the host with the given MAC address: # TODO: What is used? The element 'mac' or IPNumber's 'mac_adr'? return ((row['name'], row['mac']) for row in self.aaaa.list_ext() if row['mac'] and (not mac_address or (row['mac'] == mac_address))) def register_mac_address(self, hostname, mac_address): """Register a MAC address for a given host.""" self.aaaa.clear() a_id = self.finder.find_a_record(hostname) self.aaaa.find(a_id) # TODO: do any validation on the MAC address? self.aaaa.mac = mac_address self.aaaa.write_db() self.db.commit() return True def get_vlan_info(self, hostname): """Get the VLAN info about a given host. The needed details are VLAN number and net category. """ self.subnet.clear() # Check if hostname is rather an IP address or subnet: if ':' in hostname: self.subnet.find(hostname) else: a_id = self.finder.find_a_record(hostname) self.aaaa.clear() self.aaaa.find(a_id) self.ip.clear() self.ip.find(a_id.ip_number) # bah, now we have the ip address self.subnet.find(self.ip.aaaa_ip) return (self.subnet.vlan_number, self.subnet.subnet_mask)
# -*- coding: utf_8 -*- # Module for Malware Analysis from urllib.parse import urlparse import logging import shutil import io import os import re import tempfile import requests from django.conf import settings from MobSF.utils import ( PrintException, isInternetAvailable, upstream_proxy, sha256 ) logger = logging.getLogger(__name__) # PATH MALWARE_DB_DIR = TOOLS_DIR = os.path.join( settings.BASE_DIR, 'MalwareAnalyzer/malwaredb/') def update_malware_db(): """Check for update in malware DB""" try: proxies, verify = upstream_proxy('htt
p') except: PrintException("[ERROR] Set
ting upstream proxy") try: url = "http://www.malwaredomainlist.com/mdlcsv.php" response = requests.get(url, timeout=3, proxies=proxies, verify=verify) data = response.content tmp_dwd = tempfile.NamedTemporaryFile() tmp_dwd.write(data) mal_db = os.path.join(MALWARE_DB_DIR, 'malwaredomainlist') tmp_dwd.seek(0) # Check1: SHA256 Change if sha256(tmp_dwd.name) != sha256(mal_db): # DB needs update # Check2: DB Syntax Changed line = tmp_dwd.readline().decode("utf-8", "ignore") lst = line.split('",') if len(lst) == 10: # DB Format is not changed. Let's update DB logger.info("Updating Malware Database....") shutil.copyfile(tmp_dwd.name, mal_db) else: logger.info("Malware Database format from malwaredomainlist.com changed. Database is not updated. " "Please report to: https://github.com/MobSF/Mobile-Security-Framework-MobSF/issues") else: logger.info("Malware Database is up-to-date.") tmp_dwd.close() except: PrintException("[ERROR] Malware DB Update") def malware_check(urllist): result = {} try: if settings.DOMAIN_MALWARE_SCAN == False: logger.info("Domain Malware Check disabled in settings") return result domainlist = get_domains(urllist) if domainlist: if isInternetAvailable(): update_malware_db() else: logger.warning( "No Internet Connection. Skipping Malware Database Update.") mal_db = os.path.join(MALWARE_DB_DIR, 'malwaredomainlist') with io.open(mal_db, mode='r', encoding="utf8", errors="ignore") as flip: entry_list = flip.readlines() for entry in entry_list: enlist = entry.split('","') if len(enlist) > 5: details_dict = dict() details_dict["domain_or_url"] = enlist[1] details_dict["ip"] = enlist[2] details_dict["desc"] = enlist[4] details_dict["bad"] = "yes" for domain in domainlist: if (details_dict["domain_or_url"].startswith(domain) or details_dict["ip"].startswith(domain)): result[domain] = details_dict # Good Domains for domain in domainlist: if domain not in result: tmp_d = dict() tmp_d["bad"] = "no" result[domain] = tmp_d except: PrintException("[ERROR] Performing Malware Check") return result # Helper Functions def get_domains(urls): """Get Domains""" try: domains = [] for url in urls: parsed_uri = urlparse(url) domain = '{uri.netloc}'.format(uri=parsed_uri) if ((domain not in domains) and (len(domain) > 2) and ("." in domain) and (domain.endswith(".") is False and re.search('[a-zA-Z0-9]', domain))): domains.append(domain) return domains except: PrintException("[ERROR] Extracting Domain form URL")
chemy import ModelAdminModule, model_form from flask.ext.sqlalchemy import SQLAlchemy from sqlalchemy.orm import aliased, contains_eager from login_utils import encrypt_password, check_password from flask.views import MethodView app = Flask('level2_pycrm') app.config['SECRET_KEY'] = 'secret' app.debug = True app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db' app.jinja_env.trim_blocks = True db = SQLAlchemy(app) db_session = db.session #session['logged_in'] = False def is_email(data): 'returns true if given data that looks like an email' return '@' in data and '.com' in data def is_unique(itm): 'returns true if itm is unique email or name in database' def login_required(test): @wraps(test) def wrap(*args,**kwargs): if 'logged_in' in session: #and not session['logged_in']: return test(*args,**kwargs) else: return redirect(url_for('login',next=request.url)) return wrap # view functions @app.route('/login',methods=["POST","GET"]) def login(): if request.method.upper() == "POST": pw = encrypt_password(request.form['password']) username = request.form['username'] session['logged_in'] = True return redirect(url_for('redirect_to_admin')) return render_template('login.html') @app.route('/register',methods=["POST","GET"]) def register(): if request.method.upper() == "POST": username = request.form['username'] # check_unique_username(username) # error if not email = request.form['email'] # same verification above pw1 = encrypt_password(request.form['password']) pw2 = request.form['confirm'] if not check_password(pw2,pw1): flash('Passwords didnt match, try again') return redirect(url_for('register')) else: attrs = ( ('username',username),('email',email), ) return render_template("verify_registration.html",attrs=attrs) return render_template('register.html') @app.route('/verified') def verified(): session['logged_in'] = True flash('Thank you for sgning up') return redirect(url_for('redirect_to_admin')) @app.teardown_context def auto_logout(): if session.get('logged_in',False): del(session['logged_in']) # define model classes for Modules #class Worker(db.Model): class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(255), unique=True, nullable=False) email = db.Column(db.String(255), unique=True, nullable=False) send_info_email = db.Column(db.Boolean()) zone = db.Column(db.Integer,nullable=False) is_active = db.Column(db.Boolean()) account_num = db.Column(db.String(20),nullable=False) password = db.Column(db.String(255)) create_password = db.Column(db.Boolean()) def __unicode__(self): return self.username def __str__(self): return self.__unicode__() class Company(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), unique=True, nullable=False) zone = db.Column(db.Integer,nullable=False) account_num = db.Column(db.String(15),nullable=False) main_contact_id = db.Column(db.Integer,db.ForeignKey(User.id)) contacts_group = db.relationship(User,backref=db.backref("Agencys")) main_phone = db.Column(db.String(15)) alt_phone = db.Column(db.String(15)) #main_contact_email = db.Column(db.Integer,db.ForeignKey(User.email)) date_created = db.Column(db.String(10)) date_modified = db.Column(db.String(10)) contract_start = db.Column(db.String(10)) contract_end = db.Column(db.String(10)) def __unicode__(self): return unicode(self.name) def __repr__(self): return '<Agency %r>' % self.name class Warehouse(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), nullable=False) company_id = db.Column(db.Integer, db.ForeignKey(Company.id)) company = db.relationship(Company, backref=db.backref("warehouses")) def __unicode__(self): return self.name def __repr__(self): return '<Warehouse %r>' % self.name class Profile(db.Model): id = db.Column(db.Integer, db.ForeignKey(User.id), primary_key=True) name = db.Column(db.String(255), nullable=False) location = db.Column(db.String(255)) company_id = db.Column(db.Integer, db.ForeignKey(Company.id), nullable=True) user = db.relationship(User, backref=db.backref("profile", remote_side=id, uselist=False, cascade="all, delete-orphan")) company = db.relationship(Company, backref=db.backref("staff")) def __unicode__(self): return self.user.username user_group = db.Table( 'user_group', db.Model.metadata, db.Column('user_id', db.Integer, db.ForeignKey('user.id')), db.Column('group_id', db.Integer, db.ForeignKey('group.id')) ) class Group(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), unique=True, nullable=False) users = db.relationship("User", secondary=user_group, backref=db.backref("groups", lazy='dynamic')) def __unicode__(self): return unicode(self.name) def __repr__(self): return '<Group %r>' % self.name db.drop_all() db.create_all() group = Group(name="admins") db_session.add(group) company = Company(name="Level 2 Designs",zone=1,main_phone='714-783-6369',account_num="4565") user = User(username="kyle",zone=1,account_num="222",email="kyle@level2designs.com",password="14wp88",is_active=True) db_session.add(user) db_session.add(company) db_session.commit() UserForm = model_form(User, db_session)#,exclude=['password']) CompanyForm = model_form(Company, db_session, exclude=['main_contact_id','date_modified']) class UserForm(UserForm): # Embeds OneToOne as FormField profile = wtforms.FormField( model_form(Profile, db_session, exclude=['user'], base_class=wtforms.Form)) class UserModule(ModelAdminModule): model = User db_session = db_session profile_alias = aliased(Profile) list_fields = OrderedMultiDict(( ('id', {'label': 'id', 'column': User.id}), ('username', {'label': 'username', 'column': User.username}), ('email', {'label': 'email address', 'column': User.email}), ('zone', {'label':'Zone', 'column': User.zone}),
('account_num',{'label' : 'Account Number','column': User.account_num}), ('profile.name', {'label': 'name', 'column': profile_alias.name}), ('profile.location', {'label': 'location', 'column': profile_alias.location}), )) list_title = 'User list' searchable_fields = ['username', 'profile.name', 'zone','account_num','email'] #,'role']
order_by = ('id', 'desc') list_query_factory = model.query\ .outerjoin(profile_alias, 'profile')\ .options(contains_eager('profile', alias=profile_alias))\ form_class = UserForm detail_title = 'User Details' def create_object(self): user = self.model() user.profile = Profile() return user class CompanyForm(CompanyForm): contact = wtforms.FormField( model_form(User, db_session, exclude=['account_num','profile.name','profile.location'], base_class=wtforms.Form)) class ContactModule(ModelAdminModule): model = User db_session = db_session form_class = model_form(User, db_session) class GroupModule(ModelAdminModule): model = Group db_session = db_session form_class = model_form(Group, db_session, only=['name']) class WarehouseModule(ModelAdminModule): model = Warehouse db_session = db_session class CompanyModule(ModelAdminModule): model = Company db_session = db_session form_class = CompanyForm def create_object(self): company = self.model() company.user = User() return company admin = Admin(app, title="Level2Designs Contact Admin", main_dashboard=ContactsDashboard) security =
__author__ = "Manuel Escriche <mev@tid.es>" import os, pickle, base64, requests from datetime import datetime from kconfig import trackersBook, trackersBookByKey from kconfig import tComponentsBook from kernel.Jira import JIRA class DataEngine: class DataObject: def __init__(self, name, storage): self.storage = storage self.name = name def save(self, data): timestamp = datetime.now().strftime("%Y%m%d-%H%M") filename = 'FIWARE.Engine.{}.{}.pkl'.format(self._type, self.name) longFilename = os.path.join(self.storage, filename) with open(longFilename, 'wb') as f: pickle.dump((timestamp, data), f, pickle.HIGHEST_PROTOCOL) return filename def load(self): filename = 'FIWARE.Engine.{}.{}.pkl'.format(self._type, self.name) try: f = open(os.path.join(self.storage, filename), 'rb') timestamp, data = pickle.load(f) except FileNotFoundError: raise else: f.close() return data, timestamp class Tracker(DataObject): _type = 'Tracker' def __init__(self, trackername, storage): super().__init__(trackername, storage) class Comp(DataObject): _type = 'Component' def __init__(self, cmpname, storage): super().__init__(cmpname, storage) class Query(DataObject): _type = 'Query' def __init__(self, name, storage): super().__init__(name, storage ) def __init__(self, storage): self.storage = storage #self.jira = JIRA() @classmethod def snapshot(cls, storage): jira = JIRA() files = list() for trackername in trackersBook: tracker = trackersBook[trackername] data = jira.getTrackerData(tracker.keystone) filename = DataEngine.Tracker(trackername, storage).save(data) files.append(filename) return files def getTrackerData(self, tracker_id): tracker = trackersBookByKey[tracker_id] return DataEngine.Tracker(tracker.name, self.storage).load() def saveTrackerData(self, tracker_id, data): tracker = trackersBookByKey[tracker_id] DataEngine.Tracker(tracker.name, self.storage).save(data) def getComponentData(self, cmp_id): comp = tComponentsBook[cmp_id] name = '{}-{}'.format(comp.name, cmp_id) try: return DataEngine.Comp(name, self.storage).load() except Exception: tracker = trackersBookByKey[comp.tracker] trackerData, timestamp = DataEngine.Tracker(tracker.name, self.storage).load() data = list() for item in trackerData: try:key = item['fields']['components'][0]['id'] except Exception: continue if cmp_id == key: data.append(item) return data, timestamp def saveComponentData(self, cmp_id, data): cmp = tComponentsBook[cmp_id] name = '{}-{}'.format(cmp.name, cmp_id) DataEngine.Comp(name, self.storage).save(data) def getQueryData(self, name): return DataEngine.Query(name, self.storage).load() def saveQueryData(self, name, data): DataEngine.Query(name, self.storage).save(data) class DataFactory: def __init__(self, storage): self.engine = DataEngine(storage) def getTrackerData(self, tracker_id): data, timestamp = self.engine.getTrackerData(tracker_id) source = 'store' return data, timestamp, source def getComponentData(self, cmp_id): try: data = JIRA().getComponentData(cmp_id) self.engine.saveComponentData(cmp_id, data) timestamp = datetime.now().strftime("%Y%m%d-%H%M") source = 'jira' except Exception: data, timestamp = self.engine.getComponentData(cmp_id) source = 'store' return data, timestamp, source def getQueryData(self, name, jql): try: data = JIRA().getQuery(jql) self.engine.saveQueryData(name, data) timestamp = datetime.now().strftime("%Y%m%d-%H%M") source = 'jira' except: data, timestamp = self.engine.getQueryData(name) source = 'store' return data, timestamp, source def getTrackerNoComponentData(self, tracker_id): jql = 'project = {} AND component = EMPTY'.format(tracker_id) name = '{}-NoComp'.format(tracker_id) try: data = JIRA().getQuery(jql) s
elf.engine.saveQueryData(name, data) timestamp = datetime.now().strftime(
"%Y%m%d-%H%M") source = 'jira' except Exception: data, timestamp = self.engine.getQueryData(name) source = 'store' return data, timestamp, source if __name__ == "__main__": pass