commit
stringlengths
40
40
subject
stringlengths
1
1.49k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
new_contents
stringlengths
1
29.8k
old_contents
stringlengths
0
9.9k
lang
stringclasses
3 values
proba
float64
0
1
c9a0fb540a9ee8005c1ee2d70613c39455891bee
Add analyze_bound_horizontal tests module
tests/plantcv/test_analyze_bound_horizontal.py
tests/plantcv/test_analyze_bound_horizontal.py
import pytest import cv2 from plantcv.plantcv import analyze_bound_horizontal, outputs @pytest.mark.parametrize('pos,exp', [[200, 58], [-1, 0], [100, 0], [150, 11]]) def test_analyze_bound_horizontal(pos, exp, test_data): # Clear previous outputs outputs.clear() # Read in test data img = cv2.imread(test_data.small_rgb_img) # img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT)) mask = cv2.imread(test_data.small_bin_img, -1) object_contours = test_data.load_composed_contours(test_data.small_composed_contours_file) # _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300) # _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100) # _ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756) # _ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756) _ = analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=pos) assert outputs.observations["default"]["height_above_reference"]["value"] == exp def test_analyze_bound_horizontal_grayscale_image(test_data): # Read in test data img = cv2.imread(test_data.small_gray_img, -1) mask = cv2.imread(test_data.small_bin_img, -1) object_contours = test_data.load_composed_contours(test_data.small_composed_contours_file) boundary_img = analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=200) assert len(boundary_img.shape) == 3
Python
0.000001
198cf78895db88a8986926038e817ebb2bf75eb2
add migration for notification tables
portal/migrations/versions/458dd2fc1172_.py
portal/migrations/versions/458dd2fc1172_.py
from alembic import op import sqlalchemy as sa """empty message Revision ID: 458dd2fc1172 Revises: 8ecdd6381235 Create Date: 2017-12-21 16:38:49.659073 """ # revision identifiers, used by Alembic. revision = '458dd2fc1172' down_revision = '8ecdd6381235' def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('notifications', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.Text(), nullable=False), sa.Column('content', sa.Text(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name') ) op.create_table('user_notifications', sa.Column('id', sa.Integer(), nullable=False), sa.Column('user_id', sa.Integer(), nullable=False), sa.Column('notification_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['notification_id'], ['notifications.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('user_id', 'notification_id', name='_user_notification') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('user_notifications') op.drop_table('notifications') # ### end Alembic commands ###
Python
0
8c7fa4e16805dc9e8adbd5615c610be8ba92c444
Add argparse tests for gatherkeys
ceph_deploy/tests/parser/test_gatherkeys.py
ceph_deploy/tests/parser/test_gatherkeys.py
import pytest from ceph_deploy.cli import get_parser class TestParserGatherKeys(object): def setup(self): self.parser = get_parser() def test_gather_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('gatherkeys --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy gatherkeys' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_gatherkeys_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('gatherkeys'.split()) out, err = capsys.readouterr() assert "error: too few arguments" in err def test_gatherkeys_one_host(self): args = self.parser.parse_args('gatherkeys host1'.split()) assert args.mon == ['host1'] def test_gatherkeys_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['gatherkeys'] + hostnames) assert args.mon == hostnames
Python
0
5fb08288c1250174a7de2caa9163f49a0a9d761a
RSSD ID can be null
institutions/respondants/migrations/0005_auto__chg_field_institution_rssd_id.py
institutions/respondants/migrations/0005_auto__chg_field_institution_rssd_id.py
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Changing field 'Institution.rssd_id' db.alter_column('respondants_institution', 'rssd_id', self.gf('django.db.models.fields.CharField')(max_length=10, null=True)) def backwards(self, orm): # Changing field 'Institution.rssd_id' db.alter_column('respondants_institution', 'rssd_id', self.gf('django.db.models.fields.CharField')(default='', max_length=10)) models = { 'respondants.agency': { 'Meta': {'object_name': 'Agency'}, 'acronym': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'hmda_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}) }, 'respondants.institution': { 'Meta': {'unique_together': "(('ffiec_id', 'agency'),)", 'object_name': 'Institution'}, 'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['respondants.Agency']"}), 'ffiec_id': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mailing_address': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['respondants.Institution']"}), 'rssd_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'tax_id': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'top_holder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'descendants'", 'null': 'True', 'to': "orm['respondants.TopHolderInstitution']"}), 'year': ('django.db.models.fields.SmallIntegerField', [], {}), 'zip_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['respondants.ZipcodeCityState']"}) }, 'respondants.topholderinstitution': { 'Meta': {'object_name': 'TopHolderInstitution'}, 'city': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'rssd_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'unique': 'True', 'null': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True'}), 'year': ('django.db.models.fields.SmallIntegerField', [], {}) }, 'respondants.zipcodecitystate': { 'Meta': {'unique_together': "(('zip_code', 'city'),)", 'object_name': 'ZipcodeCityState'}, 'city': ('django.db.models.fields.CharField', [], {'max_length': '25'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'plus_four': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'state': ('localflavor.us.models.USStateField', [], {'max_length': '2'}), 'zip_code': ('django.db.models.fields.IntegerField', [], {}) } } complete_apps = ['respondants']
Python
0.999997
2803b237af18c6d5cd0613eaf4eccf2b61e65100
Create afImgPanel.py
scripts/afImgPanel.py
scripts/afImgPanel.py
import pymel.core as pm import pymel.all as pa imgOp = 0.3 imgDep = 10 #get current camera curCam = pm.modelPanel(pm.getPanel(wf=True),q=True,cam=True) #select image and creat imagePlane and setup fileNm = pm.fileDialog2(ds=0,fm=1,cap='open',okc='Select Image') ImgPln = pm.imagePlane(fn=fileNm[0],lookThrough=curCam,maintainRatio=1) pm.setAttr(ImgPln[1]+'.displayOnlyIfCurrent',True) pm.setAttr(ImgPln[0]+'.translateZ',-pm.getAttr(curCam+'.translateZ')/3+-imgDep) pm.setAttr(ImgPln[1]+'.alphaGain',imgOp) pm.setAttr(ImgPln[1]+'.textureFilter',1) #aligh to the camera #create locator to be the parent and then create parent constraint pLoc = pm.spaceLocator() pm.parent(ImgPln[0],pLoc) pm.parentConstraint(curCam,pLoc) #Toggle image plane visibility if(pm.getAttr(ImgPln[1]+'.visibility')): pm.setAttr(ImgPln[1]+'.visibility',0) else: pm.setAttr(ImgPln[1]+'.visibility',1)
Python
0.000001
96a6f45dd73819ebbe11169cc17bf3c056ce7a9e
Add sfp_api_recon_dev module
modules/sfp_api_recon_dev.py
modules/sfp_api_recon_dev.py
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------- # Name: sfp_api_recon_dev # Purpose: Search api.recon.dev for subdomains. # # Authors: <bcoles@gmail.com> # # Created: 2020-08-14 # Copyright: (c) bcoles 2020 # Licence: GPL # ------------------------------------------------------------------------------- import json import time import urllib from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent class sfp_api_recon_dev(SpiderFootPlugin): """api.recon.dev:Footprint,Investigate,Passive:Passive DNS::Search api.recon.dev for subdomains.""" opts = { "verify": True, "delay": 1 } optdescs = { "verify": "Verify identified domains still resolve to the associated specified IP address.", "delay": "Delay between requests, in seconds." } results = None errorState = False def setup(self, sfc, userOpts=dict()): self.sf = sfc self.results = self.tempStorage() self.errorState = False for opt in userOpts.keys(): self.opts[opt] = userOpts[opt] def watchedEvents(self): return ["DOMAIN_NAME"] def producedEvents(self): return ["RAW_RIR_DATA", "INTERNET_NAME"] def queryDomain(self, qry): headers = { "Accept" : "application/json" } params = { 'domain': qry.encode('raw_unicode_escape').decode("ascii", errors='replace') } res = self.sf.fetchUrl( 'https://api.recon.dev/search?' + urllib.parse.urlencode(params), headers=headers, timeout=30, useragent=self.opts['_useragent'] ) time.sleep(self.opts['delay']) return self.parseAPIResponse(res) def parseAPIResponse(self, res): # Future proofing - api.recon.dev does not implement rate limiting if res['code'] == '429': self.sf.error("You are being rate-limited by api.recon.dev", False) self.errorState = True return None # Catch all non-200 status codes, and presume something went wrong if res['code'] != '200': self.sf.error("Failed to retrieve content from api.recon.dev", False) self.errorState = True return None if res['content'] is None: return None try: data = json.loads(res['content']) except Exception as e: self.sf.debug("Error processing JSON response.") return None # returns list of results; 'null' when no results; or dict when there's an error if not isinstance(data, list): self.sf.error("Failed to retrieve content from api.recon.dev", False) if isinstance(data, dict) and data.get('message'): self.sf.debug(f"Failed to retrieve content from api.recon.dev: {data.get('message')}") self.errorState = True return None return data def handleEvent(self, event): eventName = event.eventType srcModuleName = event.module eventData = event.data if self.errorState: return None if eventData in self.results: return None self.results[eventData] = True self.sf.debug("Received event, %s, from %s" % (eventName, srcModuleName)) if eventName not in ["DOMAIN_NAME"]: return None data = self.queryDomain(eventData) if data is None: self.sf.debug("No information found for domain " + eventData) return None evt = SpiderFootEvent('RAW_RIR_DATA', str(data), self.__name__, event) self.notifyListeners(evt) domains = [] for result in data: raw_domains = result.get('rawDomains') if raw_domains: for domain in raw_domains: domains.append(domain) for domain in set(domains): if self.checkForStop(): return None if domain in self.results: continue if not self.getTarget().matches(domain, includeChildren=True, includeParents=True): continue if self.opts['verify'] and not self.sf.resolveHost(domain): self.sf.debug("Host %s could not be resolved" % domain) evt = SpiderFootEvent("INTERNET_NAME_UNRESOLVED", domain, self.__name__, event) self.notifyListeners(evt) else: evt = SpiderFootEvent("INTERNET_NAME", domain, self.__name__, event) self.notifyListeners(evt) return None # End of sfp_api_recon_dev class
Python
0.000002
f24fe32329625ec037a9afc8d3bdeed5f41e69a0
Add a script for easy diffing of two Incars.
scripts/diff_incar.py
scripts/diff_incar.py
#!/usr/bin/env python ''' Created on Nov 12, 2011 ''' __author__="Shyue Ping Ong" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyue@mit.edu" __date__ = "Nov 12, 2011" import sys import itertools from pymatgen.io.vaspio import Incar from pymatgen.util.string_utils import str_aligned filepath1 = sys.argv[1] filepath2 = sys.argv[2] incar1 = Incar.from_file(filepath1) incar2 = Incar.from_file(filepath2) def format_lists(v): if isinstance(v, (tuple, list)): return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)]) return v d = incar1.diff(incar2) output = [['SAME PARAMS','', '']] output.append(['---------------','', '']) output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"]) output.append(['','', '']) output.append(['DIFFERENT PARAM','', '']) output.append(['---------------','', '']) output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"]) print str_aligned(output, ['', filepath1, filepath2])
Python
0.999906
7e2170feef60866b8938595f674ae4dd70c5cc46
Add benchmark for F.transpose()
python/benchmark/function/test_transpose.py
python/benchmark/function/test_transpose.py
# Copyright 2022 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import nnabla.functions as F from function_benchmark import FunctionBenchmark, Inspec def inspecs_params(): inspecs = [] # Reported bad performance cases # These three cases were optimized well by cuTENSOR. inspecs.append(([Inspec((32, 144, 28, 1))], (0, 1, 3, 2))) inspecs.append(([Inspec((32, 144, 28, 3))], (0, 1, 3, 2))) inspecs.append(([Inspec((768, 50, 50))], (0, 2, 1))) # From ResNet-50 # Input side inspecs.append(([Inspec((192, 224, 224, 3))], (0, 3, 1, 2))) inspecs.append(([Inspec((192, 3, 224, 224))], (0, 2, 3, 1))) # Output side inspecs.append(([Inspec((192, 3, 3, 512))], (0, 3, 1, 2))) inspecs.append(([Inspec((192, 512, 3, 3))], (0, 2, 3, 1))) inspecs.append(([Inspec((192, 1, 1, 2048))], (0, 3, 1, 2))) inspecs.append(([Inspec((192, 2048, 1, 1))], (0, 2, 3, 1))) # Single input inspecs.append(([Inspec((1, 224, 224, 3))], (0, 3, 1, 2))) inspecs.append(([Inspec((1, 3, 224, 224))], (0, 2, 3, 1))) inspecs.append(([Inspec((1, 3, 3, 512))], (0, 3, 1, 2))) inspecs.append(([Inspec((1, 512, 3, 3))], (0, 2, 3, 1))) inspecs.append(([Inspec((1, 1, 1, 2048))], (0, 3, 1, 2))) inspecs.append(([Inspec((1, 2048, 1, 1))], (0, 2, 3, 1))) # Other # 2D inspecs.append(([Inspec((64, 64))], (1, 0))) inspecs.append(([Inspec((1024, 1024))], (1, 0))) # 4D inspecs.append(([Inspec((64, 64, 64, 64))], (0, 1, 2, 3))) inspecs.append(([Inspec((64, 64, 64, 64))], (0, 1, 3, 2))) inspecs.append(([Inspec((64, 64, 64, 64))], (0, 3, 2, 1))) inspecs.append(([Inspec((64, 64, 64, 64))], (0, 2, 1, 3))) inspecs.append(([Inspec((64, 64, 64, 64))], (0, 3, 1, 2))) inspecs.append(([Inspec((64, 64, 64, 64))], (0, 2, 3, 1))) # 4D misaligned inspecs.append(([Inspec((65, 65, 65, 65))], (0, 1, 2, 3))) inspecs.append(([Inspec((65, 65, 65, 65))], (0, 1, 3, 2))) inspecs.append(([Inspec((65, 65, 65, 65))], (0, 3, 2, 1))) inspecs.append(([Inspec((65, 65, 65, 65))], (0, 2, 1, 3))) inspecs.append(([Inspec((65, 65, 65, 65))], (0, 3, 1, 2))) inspecs.append(([Inspec((65, 65, 65, 65))], (0, 2, 3, 1))) return inspecs @pytest.mark.parametrize('inspecs, axis', inspecs_params()) def test_transpose(inspecs, axis, nnabla_opts): fb = FunctionBenchmark( F.transpose, inspecs, [axis], dict(), nnabla_opts.ext, nnabla_opts.ext_kwargs) fb.benchmark() fb.write(writer=nnabla_opts.function_benchmark_writer)
Python
0.000002
c36ae47bee44ff8aa8eaf17f8ded88192d7a6573
implement query term search
queryAnswer.py
queryAnswer.py
import pickle # Loads the posting Index index = open("posIndex.dat", "rb"); posIndex = pickle.load(index); print posIndex['made']; query = "Juan made of youtube" # query = raw_input('Please enter your query: '); queryTerms = ' '.join(query.split()); queryTerms = queryTerms.split(' '); k = len(queryTerms); print (queryTerms); i = 0; for term in queryTerms: queryTerms[i] = term.lower(); if term in posIndex.keys(): print "%s -->\t %s\n" % (term, posIndex[term]); else: print "%s -->\n" % (term); i = i +1;
Python
0.999171
625d250c7eabcf48292590a6b0ca57f1b3cc7c49
Add meshprocessing scratch
scratch/meshprocessing.py
scratch/meshprocessing.py
import networkx as nx from time import time import numpy as np def mesh2graph(faces): """ Converts a triangular mesh to a graph only taking the connectivity into account """ g = nx.Graph() for i in range(len(faces)): g.add_edge(faces[i,0], faces[i,1]) g.add_edge(faces[i,1], faces[i,2]) return g def graphlaplacian(g): import scipy.sparse as sp # scipy.sparse.linalg.eigen n = g.order() D = sp.identity(n) A = nx.to_scipy_sparse_matrix(g) di = A.sum(axis=1).T.tolist()[0] D.setdiag(di) L = D - A return L def grapheigendecomposition(graphlaplacian, k = 3): """ k is the number of eigenvalues desired See http://docs.scipy.org/doc/scipy/reference/sparse.linalg.html """ from scipy.sparse.linalg.eigen import lobpcg guess = np.random.rand(graphlaplacian.shape[0],k) * 100 return lobpcg(graphlaplacian, guess) if __name__ == '__main__': faces = np.array([ [0,1,2], [1,2,3]], dtype = np.uint) start = time() import nibabel.gifti as gi a=gi.read('/home/stephan/Dev/PyWorkspace/connectomeviewer/cviewer/resources/atlases/template_atlas_homo_sapiens_01/Gifti/fsaverage.gii') faces = a.darrays[1].data[:100,:] print "Loading took ", time()-start g = mesh2graph(faces) print "Making graph ", time()-start gl = graphlaplacian(g) print "Getting laplacian ", time()-start w,v = grapheigendecomposition(gl, k = 3) # Ev, Evect = eig(gl) print w print "Getting eigendecomposition ", time()-start from scipy.linalg import eig, eigh Ev, Evect = eigh(gl.todense()) print Ev #print np.real(Ev)
Python
0.000001
6a13511db8401a17a5c6feb7071af821211c2836
Create sitemap urls
opps/sitemaps/urls.py
opps/sitemaps/urls.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.conf.urls import patterns, url from django.contrib.sitemaps import views as sitemap_views from opps.sitemaps.sitemaps import GenericSitemap, InfoDisct sitemaps = { 'articles': GenericSitemap(InfoDisct(), priority=0.6), } sitemaps_googlenews = { 'articles': GenericSitemap(InfoDisct(True), priority=0.6), } urlpatterns = patterns( '', url(r'^\.xml$', sitemap_views.index, {'sitemaps': sitemaps}), url(r'^-googlenews\.xml$', sitemap_views.sitemap, {'sitemaps': sitemaps_googlenews, 'template_name': 'sitemap_googlenews.xml'}), url(r'^-(?P<section>.+)\.xml$', sitemap_views.sitemap, {'sitemaps': sitemaps}), )
Python
0.000207
78d926434ff1ad6ade0764ac18cca2413a5beccb
Bump dev version in master
fabric/version.py
fabric/version.py
""" Current Fabric version constant plus version pretty-print method. This functionality is contained in its own module to prevent circular import problems with ``__init__.py`` (which is loaded by setup.py during installation, which in turn needs access to this version information.) """ from subprocess import Popen, PIPE from os.path import abspath, dirname def git_sha(): loc = abspath(dirname(__file__)) p = Popen( "cd \"%s\" && git log -1 --format=format:%%h" % loc, shell=True, stdout=PIPE, stderr=PIPE ) return p.communicate()[0] VERSION = (1, 3, 0, 'alpha', 0) def get_version(form='short'): """ Return a version string for this package, based on `VERSION`. Takes a single argument, ``form``, which should be one of the following strings: * ``branch``: just the major + minor, e.g. "0.9", "1.0". * ``short`` (default): compact, e.g. "0.9rc1", "0.9.0". For package filenames or SCM tag identifiers. * ``normal``: human readable, e.g. "0.9", "0.9.1", "0.9 beta 1". For e.g. documentation site headers. * ``verbose``: like ``normal`` but fully explicit, e.g. "0.9 final". For tag commit messages, or anywhere that it's important to remove ambiguity between a branch and the first final release within that branch. """ # Setup versions = {} branch = "%s.%s" % (VERSION[0], VERSION[1]) tertiary = VERSION[2] type_ = VERSION[3] final = (type_ == "final") type_num = VERSION[4] firsts = "".join([x[0] for x in type_.split()]) sha = git_sha() sha1 = (" (%s)" % sha) if sha else "" # Branch versions['branch'] = branch # Short v = branch if (tertiary or final): v += "." + str(tertiary) if not final: v += firsts if type_num: v += str(type_num) else: v += sha1 versions['short'] = v # Normal v = branch if tertiary: v += "." + str(tertiary) if not final: if type_num: v += " " + type_ + " " + str(type_num) else: v += " pre-" + type_ + sha1 versions['normal'] = v # Verbose v = branch if tertiary: v += "." + str(tertiary) if not final: if type_num: v += " " + type_ + " " + str(type_num) else: v += " pre-" + type_ + sha1 else: v += " final" versions['verbose'] = v try: return versions[form] except KeyError: raise TypeError('"%s" is not a valid form specifier.' % form) __version__ = get_version('short')
""" Current Fabric version constant plus version pretty-print method. This functionality is contained in its own module to prevent circular import problems with ``__init__.py`` (which is loaded by setup.py during installation, which in turn needs access to this version information.) """ from subprocess import Popen, PIPE from os.path import abspath, dirname def git_sha(): loc = abspath(dirname(__file__)) p = Popen( "cd \"%s\" && git log -1 --format=format:%%h" % loc, shell=True, stdout=PIPE, stderr=PIPE ) return p.communicate()[0] VERSION = (1, 2, 0, 'final', 0) def get_version(form='short'): """ Return a version string for this package, based on `VERSION`. Takes a single argument, ``form``, which should be one of the following strings: * ``branch``: just the major + minor, e.g. "0.9", "1.0". * ``short`` (default): compact, e.g. "0.9rc1", "0.9.0". For package filenames or SCM tag identifiers. * ``normal``: human readable, e.g. "0.9", "0.9.1", "0.9 beta 1". For e.g. documentation site headers. * ``verbose``: like ``normal`` but fully explicit, e.g. "0.9 final". For tag commit messages, or anywhere that it's important to remove ambiguity between a branch and the first final release within that branch. """ # Setup versions = {} branch = "%s.%s" % (VERSION[0], VERSION[1]) tertiary = VERSION[2] type_ = VERSION[3] final = (type_ == "final") type_num = VERSION[4] firsts = "".join([x[0] for x in type_.split()]) sha = git_sha() sha1 = (" (%s)" % sha) if sha else "" # Branch versions['branch'] = branch # Short v = branch if (tertiary or final): v += "." + str(tertiary) if not final: v += firsts if type_num: v += str(type_num) else: v += sha1 versions['short'] = v # Normal v = branch if tertiary: v += "." + str(tertiary) if not final: if type_num: v += " " + type_ + " " + str(type_num) else: v += " pre-" + type_ + sha1 versions['normal'] = v # Verbose v = branch if tertiary: v += "." + str(tertiary) if not final: if type_num: v += " " + type_ + " " + str(type_num) else: v += " pre-" + type_ + sha1 else: v += " final" versions['verbose'] = v try: return versions[form] except KeyError: raise TypeError('"%s" is not a valid form specifier.' % form) __version__ = get_version('short')
Python
0
f18dc77d49a7c5154df11232f645dbb8e0f897dd
Remove bias
models/dual_encoder.py
models/dual_encoder.py
import tensorflow as tf import numpy as np from models import helpers FLAGS = tf.flags.FLAGS def get_embeddings(hparams): if hparams.glove_path and hparams.vocab_path: tf.logging.info("Loading Glove embeddings...") vocab_array, vocab_dict = helpers.load_vocab(hparams.vocab_path) glove_vectors, glove_dict = helpers.load_glove_vectors(hparams.glove_path, vocab=set(vocab_array)) initializer = helpers.build_initial_embedding_matrix(vocab_dict, glove_dict, glove_vectors, hparams.embedding_dim) else: tf.logging.info("No glove/vocab path specificed, starting with random embeddings.") initializer = tf.random_uniform_initializer(-0.25, 0.25) return tf.get_variable( "word_embeddings", shape=[hparams.vocab_size, hparams.embedding_dim], initializer=initializer) def dual_encoder_model( hparams, mode, context, context_len, utterance, utterance_len, targets): # Initialize embedidngs randomly or with pre-trained vectors if available embeddings_W = get_embeddings(hparams) # Embed the context and the utterance context_embedded = tf.nn.embedding_lookup( embeddings_W, context, name="embed_context") utterance_embedded = tf.nn.embedding_lookup( embeddings_W, utterance, name="embed_utterance") # Build the RNN with tf.variable_scope("rnn") as vs: # We use an LSTM Cell cell = tf.nn.rnn_cell.LSTMCell( hparams.rnn_dim, forget_bias=2.0, use_peepholes=True, state_is_tuple=True) # Run the utterance and context through the RNN rnn_outputs, rnn_states = tf.nn.dynamic_rnn( cell, tf.concat(0, [context_embedded, utterance_embedded]), sequence_length=tf.concat(0, [context_len, utterance_len]), dtype=tf.float32) encoding_context, encoding_utterance = tf.split(0, 2, rnn_states.h) with tf.variable_scope("prediction") as vs: M = tf.get_variable("M", shape=[hparams.rnn_dim, hparams.rnn_dim], initializer=tf.truncated_normal_initializer()) # b = tf.get_variable("b", [hparams.rnn_dim]) # "Predict" a response: c * M generated_response = tf.matmul(encoding_context, M) generated_response = tf.expand_dims(generated_response, 2) encoding_utterance = tf.expand_dims(encoding_utterance, 2) # Dot product between generated response and actual response # (c * M) * r logits = tf.batch_matmul(generated_response, encoding_utterance, True) logits = tf.squeeze(logits, [2]) # Apply sigmoid to convert logits to probabilities probs = tf.sigmoid(logits) # Calculate the binary cross-entropy loss losses = tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.to_float(targets)) # Mean loss across the batch of examples mean_loss = tf.reduce_mean(losses, name="mean_loss") return probs, mean_loss
import tensorflow as tf import numpy as np from models import helpers FLAGS = tf.flags.FLAGS def get_embeddings(hparams): if hparams.glove_path and hparams.vocab_path: tf.logging.info("Loading Glove embeddings...") vocab_array, vocab_dict = helpers.load_vocab(hparams.vocab_path) glove_vectors, glove_dict = helpers.load_glove_vectors(hparams.glove_path, vocab=set(vocab_array)) initializer = helpers.build_initial_embedding_matrix(vocab_dict, glove_dict, glove_vectors, hparams.embedding_dim) else: tf.logging.info("No glove/vocab path specificed, starting with random embeddings.") initializer = tf.random_uniform_initializer(-0.25, 0.25) return tf.get_variable( "word_embeddings", shape=[hparams.vocab_size, hparams.embedding_dim], initializer=initializer) def dual_encoder_model( hparams, mode, context, context_len, utterance, utterance_len, targets): # Initialize embedidngs randomly or with pre-trained vectors if available embeddings_W = get_embeddings(hparams) # Embed the context and the utterance context_embedded = tf.nn.embedding_lookup( embeddings_W, context, name="embed_context") utterance_embedded = tf.nn.embedding_lookup( embeddings_W, utterance, name="embed_utterance") # Build the RNN with tf.variable_scope("rnn") as vs: # We use an LSTM Cell cell = tf.nn.rnn_cell.LSTMCell( hparams.rnn_dim, forget_bias=2.0, use_peepholes=True, state_is_tuple=True) # Run the utterance and context through the RNN rnn_outputs, rnn_states = tf.nn.dynamic_rnn( cell, tf.concat(0, [context_embedded, utterance_embedded]), sequence_length=tf.concat(0, [context_len, utterance_len]), dtype=tf.float32) encoding_context, encoding_utterance = tf.split(0, 2, rnn_states.h) with tf.variable_scope("prediction") as vs: M = tf.get_variable("M", shape=[hparams.rnn_dim, hparams.rnn_dim], initializer=tf.truncated_normal_initializer()) b = tf.get_variable("b", [hparams.rnn_dim]) # "Predict" a response: c * M generated_response = tf.matmul(encoding_context, M) + b generated_response = tf.expand_dims(generated_response, 2) encoding_utterance = tf.expand_dims(encoding_utterance, 2) # Dot product between generated response and actual response # (c * M) * r logits = tf.batch_matmul(generated_response, encoding_utterance, True) logits = tf.squeeze(logits, [2]) # Apply sigmoid to convert logits to probabilities probs = tf.sigmoid(logits) # Calculate the binary cross-entropy loss losses = tf.nn.sigmoid_cross_entropy_with_logits(logits, tf.to_float(targets)) # Mean loss across the batch of examples mean_loss = tf.reduce_mean(losses, name="mean_loss") return probs, mean_loss
Python
0.000017
04d122d88bb9f71843df924e048b12de1976b847
Add missing migration
src/keybar/migrations/0008_entry_salt.py
src/keybar/migrations/0008_entry_salt.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('keybar', '0007_remove_entry_key'), ] operations = [ migrations.AddField( model_name='entry', name='salt', field=models.BinaryField(null=True, blank=True), preserve_default=True, ), ]
Python
0.0002
3ac5648f8f3ab9e2dd6d93002f63c65bedb3e637
Patch beanstalkd collector
src/collectors/beanstalkd/beanstalkd.py
src/collectors/beanstalkd/beanstalkd.py
# coding=utf-8 """ Collects the following from beanstalkd: - Server statistics via the 'stats' command - Per tube statistics via the 'stats-tube' command #### Dependencies * beanstalkc """ import re import diamond.collector try: import beanstalkc beanstalkc # workaround for pyflakes issue #13 except ImportError: beanstalkc = None class BeanstalkdCollector(diamond.collector.Collector): SKIP_LIST = ['version', 'id', 'hostname'] COUNTERS_REGEX = re.compile( r'^(cmd-.*|job-timeouts|total-jobs|total-connections)$') def get_default_config_help(self): config_help = super(BeanstalkdCollector, self).get_default_config_help() config_help.update({ 'host': 'Hostname', 'port': 'Port', }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(BeanstalkdCollector, self).get_default_config() config.update({ 'path': 'beanstalkd', 'host': 'localhost', 'port': 11300, }) return config def _get_stats(self): stats = {} try: connection = beanstalkc.Connection(self.config['host'], int(self.config['port'])) except beanstalkc.BeanstalkcException, e: self.log.error("Couldn't connect to beanstalkd: %s", e) return {} stats['instance'] = connection.stats() stats['tubes'] = [] for tube in connection.tubes(): tube_stats = connection.stats_tube(tube) stats['tubes'].append(tube_stats) return stats def collect(self): if beanstalkc is None: self.log.error('Unable to import beanstalkc') return {} info = self._get_stats() for stat, value in info['instance'].items(): if stat not in self.SKIP_LIST: self.publish(stat, value, metric_type=self.get_metric_type(stat)) for tube_stats in info['tubes']: tube = tube_stats['name'] for stat, value in tube_stats.items(): if stat != 'name': self.publish('tubes.%s.%s' % (tube, stat), value, metric_type=self.get_metric_type(stat)) def get_metric_type(self, stat): if self.COUNTERS_REGEX.match(stat): return 'COUNTER' return 'GAUGE'
# coding=utf-8 """ Collects the following from beanstalkd: - Server statistics via the 'stats' command - Per tube statistics via the 'stats-tube' command #### Dependencies * beanstalkc """ import re import diamond.collector try: import beanstalkc beanstalkc # workaround for pyflakes issue #13 except ImportError: beanstalkc = None class BeanstalkdCollector(diamond.collector.Collector): COUNTERS_REGEX = re.compile( r'^(cmd-.*|job-timeouts|total-jobs|total-connections)$') def get_default_config_help(self): config_help = super(BeanstalkdCollector, self).get_default_config_help() config_help.update({ 'host': 'Hostname', 'port': 'Port', }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(BeanstalkdCollector, self).get_default_config() config.update({ 'path': 'beanstalkd', 'host': 'localhost', 'port': 11300, }) return config def _get_stats(self): stats = {} try: connection = beanstalkc.Connection(self.config['host'], int(self.config['port'])) except beanstalkc.BeanstalkcException, e: self.log.error("Couldn't connect to beanstalkd: %s", e) return {} stats['instance'] = connection.stats() stats['tubes'] = [] for tube in connection.tubes(): tube_stats = connection.stats_tube(tube) stats['tubes'].append(tube_stats) return stats def collect(self): if beanstalkc is None: self.log.error('Unable to import beanstalkc') return {} info = self._get_stats() for stat, value in info['instance'].items(): if stat != 'version': self.publish(stat, value, metric_type=self.get_metric_type(stat)) for tube_stats in info['tubes']: tube = tube_stats['name'] for stat, value in tube_stats.items(): if stat != 'name': self.publish('tubes.%s.%s' % (tube, stat), value, metric_type=self.get_metric_type(stat)) def get_metric_type(self, stat): if self.COUNTERS_REGEX.match(stat): return 'COUNTER' return 'GAUGE'
Python
0.00001
531da297c57c7b359c37a743095c10e7ad0592cf
Add test_container
tests/test_container.py
tests/test_container.py
import pdir def test_acting_like_a_list(): dadada = 1 cadada = 1 vadada = 1 apple1 = 1 xapple2 = 1 result, correct = pdir(), dir() assert len(correct) == len(result) for x, y in zip(correct, result): assert x == y def test_acting_like_a_list_when_search(): dadada = 1 cadada = 1 vadada = 1 apple1 = 1 xapple2 = 1 result = pdir().s('apple') assert len(result) == 2 assert list(result) == ['apple1', 'xapple2']
Python
0.000003
79ebedc800c31b47bd0cc340de06dafcd6ade7f9
Add TwrOauth basic test
tests/test_twr_oauth.py
tests/test_twr_oauth.py
#!/usr/bin/env python # # Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in #all copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #THE SOFTWARE. import sys from gi.repository import GObject sys.path.append("..") from twitter.twr_oauth import TwrOauth from twitter.twr_account import TwrAccount consumer_key = '' consumer_secret = '' access_key = '' access_secret = '' TwrAccount.set_secrets(consumer_key, consumer_secret, access_key, access_secret) def __phase2_failed_cb(oauth, info): print '[FAILED] phase2: access-downloaded-failed, with %s' % info loop.quit() def __phase1_failed_cb(oauth, info): print '[FAILED] phase1: request-downloaded-failed, with %s' % info loop.quit() def __phase2_cb(oauth, info): print '[OK] phase2: access-downloaded, with %s' % info TwrAccount.set_secrets(consumer_key, consumer_secret, info['oauth_token'], info['oauth_token_secret']) loop.quit() def __phase1_cb(oauth, info): print '[OK] phase1: request-downloaded' url = TwrOauth.AUTHORIZATION_URL % info['oauth_token'] print 'Please visit %s' % url verifier = raw_input('verifier: ') TwrAccount.set_secrets(consumer_key, consumer_secret, info['oauth_token'], info['oauth_token_secret']) oauth.connect('access-downloaded', __phase2_cb) oauth.connect('access-downloaded-failed', __phase2_failed_cb) oauth.access_token(verifier) oauth = TwrOauth() oauth.connect('request-downloaded', __phase1_cb) oauth.connect('request-downloaded-failed', __phase1_failed_cb) oauth.request_token() loop = GObject.MainLoop() loop.run()
Python
0
221f8a23c92e8fdb58589b7958d5a1fbe63c326b
Create utils.py
bitcoin/utils.py
bitcoin/utils.py
from bitcoin.main import * from bitcoin.pyspecials import * import urlparse, re def satoshi_to_btc(val): return (float(val) / 10**8) def btc_to_satoshi(val): return int(val * 10**8 + 0.5) # Return the address and btc_amount from the # parsed uri_string. If either of address # or amount is not found that particular # return value is None. def parse_bitcoin_uri(uri_string): parsed = urlparse.urlparse(uri_string) if parsed.scheme == 'bitcoin': addr = parsed.path queries = urlparse.parse_qs(parsed.query) if 'amount' not in queries: btc_amount = None elif len(queries['amount']) == 1: btc_amount = float(queries['amount'][0]) else: btc_amount = None return addr, btc_amount else: return None, None OPS = { '00': 'OP_FALSE', '4c': 'OP_PUSHDATA1', '4d': 'OP_PUSHDATA2', '4e': 'OP_PUSHDATA4', '4f': 'OP_1NEGATE', '51': 'OP_TRUE', '52': 'OP_2', '53': 'OP_3', '54': 'OP_4', '55': 'OP_5', '56': 'OP_6', '57': 'OP_7', '58': 'OP_8', '59': 'OP_9', '5a': 'OP_10', '5b': 'OP_11', '5c': 'OP_12', '5d': 'OP_13', '5e': 'OP_14', '5f': 'OP_15', '60': 'OP_16', '61': 'OP_NOP', '63': 'OP_IF', '64': 'OP_NOTIF', '67': 'OP_ELSE', '68': 'OP_ENDIF', '69': 'OP_VERIFY', '6a': 'OP_RETURN', '6b': 'OP_TOALTSTACK', '6c': 'OP_FROMALTSTACK', '73': 'OP_IFDUP', '74': 'OP_DEPTH', '75': 'OP_DROP', '76': 'OP_DUP', '77': 'OP_NIP', '78': 'OP_OVER', '79': 'OP_PICK', '7a': 'OP_ROLL', '7b': 'OP_ROT', '7c': 'OP_SWAP', '7d': 'OP_TUCK', '6d': 'OP_2DROP', '6e': 'OP_2DUP', '6f': 'OP_3DUP', '70': 'OP_2OVER', '71': 'OP_2ROT', '72': 'OP_2SWAP', '7e': 'OP_CAT', '7f': 'OP_SUBSTR', '80': 'OP_LEFT', '81': 'OP_RIGHT', '82': 'OP_SIZE', '83': 'OP_INVERT', '84': 'OP_AND', '85': 'OP_OR', '86': 'OP_XOR', '87': 'OP_EQUAL', '88': 'OP_EQUALVERIFY', '8b': 'OP_1ADD', '8c': 'OP_1SUB', '8d': 'OP_2MUL', '8e': 'OP_2DIV', '8f': 'OP_NEGATE', '90': 'OP_ABS', '91': 'OP_NOT', '92': 'OP_0NOTEQUAL', '93': 'OP_ADD', '94': 'OP_SUB', '95': 'OP_MUL', '96': 'OP_DIV', '97': 'OP_MOD', '98': 'OP_LSHIFT', '99': 'OP_RSHIFT', '9a': 'OP_BOOLAND', '9b': 'OP_BOOLOR', '9c': 'OP_NUMEQUAL', '9d': 'OP_NUMEQUALVERIFY', '9e': 'OP_NUMNOTEQUAL', '9f': 'OP_LESSTHAN', 'a0': 'OP_GREATERTHAN', 'a1': 'OP_LESSTHANOREQUAL', 'a2': 'OP_GREATERTHANOREQUAL', 'a3': 'OP_MIN', 'a4': 'OP_MAX', 'a5': 'OP_WITHIN', 'a6': 'OP_RIPEMD160', 'a7': 'OP_SHA1', 'a8': 'OP_SHA256', 'a9': 'OP_HASH160', 'aa': 'OP_HASH256', 'ab': 'OP_CODESEPARATOR', 'ac': 'OP_CHECKSIG', 'ad': 'OP_CHECKSIGVERIFY', 'ae': 'OP_CHECKMULTISIG', 'af': 'OP_CHECKMULTISIGVERIFY', 'fd': 'OP_PUBKEYHASH', 'fe': 'OP_PUBKEY', 'ff': 'OP_INVALIDOPCODE', '50': 'OP_RESERVED', '62': 'OP_VER', '65': 'OP_VERIF', '66': 'OP_VERNOTIF', '89': 'OP_RESERVED1', '8a': 'OP_RESERVED2', 'b0': 'OP_NOP0', 'b1': 'OP_NOP1', 'b2': 'OP_NOP2', 'b3': 'OP_NOP3', 'b4': 'OP_NOP4', 'b5': 'OP_NOP5', 'b6': 'OP_NOP6', 'b7': 'OP_NOP7', 'b8': 'OP_NOP8', 'b9': 'OP_NOP9', } STANDARD_TRANSACTION_LIST = [ 'P2PKH', 'P2SH', 'Multisig', 'Pubkey', 'Null Data', ] REGEX_PATTERNS = { 'P2PKH': re.compile('OP_DUP OP_HASH160 [abcdef0123456789]+ OP_EQUALVERIFY OP_CHECKSIG'), 'P2SH': re.compile('OP_HASH160 .* OP_EQUAL'), 'Multisig': re.compile('(OP_FALSE|OP_0|OP_TRUE) ([abcdef0123456789]+ )+(OP_1|OP_2|OP_3|OP_4|OP_5) OP_CHECKMULTISIG'), 'Pubkey': re.compile('[abcdef0123456789]+ OP_CHECKSIG'), 'Null Data': re.compile('OP_RETURN [abcdef0123456789]+'), } OPname = dict([(v[3:], k) for k, v in OPS.iteritems()]) OPint = dict([(decode(k, 16), v) for k, v in OPS.iteritems()]) OPhex = OPS.copy() addr="n1hjyVvYQPQtejJcANd5ZJM5rmxHCCgWL7" #SIG64="G8kH/WEgiATGXSy78yToe36IF9AUlluY3bMdkDFD1XyyDciIbXkfiZxk/qmjGdMeP6/BQJ/C5U/pbQUZv1HGkn8="
Python
0.000001
d15c8eaca5fb115b8600a8e743ae73a9edba9a5b
Initialize P04_datetimeModule
books/AutomateTheBoringStuffWithPython/Chapter15/P04_datetimeModule.py
books/AutomateTheBoringStuffWithPython/Chapter15/P04_datetimeModule.py
# This program uses the datetime module to manipulate dates and times. # The datetime Module import datetime, time print(datetime.datetime.now()) dt = datetime.datetime(2015, 10, 21, 16, 29, 0) print((dt.year, dt.month, dt.day)) print((dt.hour, dt.minute, dt.second)) print(datetime.datetime.fromtimestamp(1000000)) print(datetime.datetime.fromtimestamp(time.time())) halloween2015 = datetime.datetime(2015, 10, 31, 0, 0, 0) newyears2016 = datetime.datetime(2016, 1, 1, 0, 0, 0) oct31_2015 = datetime.datetime(2015, 10, 31, 0, 0, 0) print(halloween2015 == oct31_2015) print(halloween2015 > newyears2016) print(newyears2016 > halloween2015) print(newyears2016 != oct31_2015)
Python
0.000008
8106c22a5c05f438eb9c6436af054fd1e72b103c
Add SK_IGNORE_FASTER_TEXT_FIX define for staging Skia change.
public/blink_skia_config.gyp
public/blink_skia_config.gyp
# # Copyright (C) 2012 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # This target is a dependency of Chromium's skia/skia_library.gyp. # It only contains code suppressions which keep Webkit tests from failing. { 'targets': [ { 'target_name': 'blink_skia_config', 'type': 'none', 'direct_dependent_settings': { 'defines': [ # Place defines here that require significant Blink rebaselining, or that # are otherwise best removed in Blink and then rolled into Chromium. # Defines should be in single quotes and a comma must appear after every one. # DO NOT remove the define until you are ready to rebaseline, and # AFTER the flag has been removed from skia.gyp in Chromium. 'SK_DEFERRED_CANVAS_USES_FACTORIES=1', 'SK_IGNORE_FASTER_TEXT_FIX', ], }, }, ], }
# # Copyright (C) 2012 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # This target is a dependency of Chromium's skia/skia_library.gyp. # It only contains code suppressions which keep Webkit tests from failing. { 'targets': [ { 'target_name': 'blink_skia_config', 'type': 'none', 'direct_dependent_settings': { 'defines': [ # Place defines here that require significant Blink rebaselining, or that # are otherwise best removed in Blink and then rolled into Chromium. # Defines should be in single quotes and a comma must appear after every one. # DO NOT remove the define until you are ready to rebaseline, and # AFTER the flag has been removed from skia.gyp in Chromium. 'SK_DEFERRED_CANVAS_USES_FACTORIES=1', ], }, }, ], }
Python
0.000004
f7586e8009ae9d2cfdc471b7dbdc9cf5d171c53b
Create string2.py
google/string2.py
google/string2.py
#!/usr/bin/env python # Copyright 2010 Google Inc. # Licensed under the Apache License, Version 2.0 # http://www.apache.org/licenses/LICENSE-2.0 # Google's Python Class # http://code.google.com/edu/languages/google-python-class/ # Additional basic string exercises # D. verbing # Given a string, if its length is at least 3, # add 'ing' to its end. # Unless it already ends in 'ing', in which case # add 'ly' instead. # If the string length is less than 3, leave it unchanged. # Return the resulting string. def verbing(s): if len(s) >= 3: if s[-3:] == 'ing': s = s + 'ly' else: s = s + 'ing' return s # E. not_bad # Given a string, find the first appearance of the # substring 'not' and 'bad'. If the 'bad' follows # the 'not', replace the whole 'not'...'bad' substring # with 'good'. # Return the resulting string. # So 'This dinner is not that bad!' yields: # This dinner is good! def not_bad(s): if s.find('not') < s.find('bad'): left_part = s[:s.find('not')] right_part = s[s.find('bad') + 3:] s = left_part + 'good' + right_part return s # F. front_back # Consider dividing a string into two halves. # If the length is even, the front and back halves are the same length. # If the length is odd, we'll say that the extra char goes in the front half. # e.g. 'abcde', the front half is 'abc', the back half 'de'. # Given 2 strings, a and b, return a string of the form # a-front + b-front + a-back + b-back def front_back(a, b): div_a = len(a) / 2 + len(a) % 2 div_b = len(b) / 2 + len(b) % 2 result = a[:div_a] + b[:div_b] + a[div_a:] + b[div_b:] return result # Simple provided test() function used in main() to print # what each function returns vs. what it's supposed to return. def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)) # main() calls the above functions with interesting inputs, # using the above test() to check if the result is correct or not. def main(): print 'verbing' test(verbing('hail'), 'hailing') test(verbing('swiming'), 'swimingly') test(verbing('do'), 'do') print print 'not_bad' test(not_bad('This movie is not so bad'), 'This movie is good') test(not_bad('This dinner is not that bad!'), 'This dinner is good!') test(not_bad('This tea is not hot'), 'This tea is not hot') test(not_bad("It's bad yet not"), "It's bad yet not") print print 'front_back' test(front_back('abcd', 'xy'), 'abxcdy') test(front_back('abcde', 'xyz'), 'abcxydez') test(front_back('Kitten', 'Donut'), 'KitDontenut') if __name__ == '__main__': main()
Python
0
e96aaab43a8433fd812cae91c0cdaed31486244f
Add sqlalchemy_sandbox.py for SQL DB experimentation
sqlalchemy_sandbox.py
sqlalchemy_sandbox.py
#!/usr/bin/env python3 """A Sandbox Script to allow me to play with and learn the SQALchemy Tools.""" import sys, argparse, os, random, sqlite3 from collections import OrderedDict try: import sqlalchemy as sqla from sqlalchemy.ext.declarative import declarative_base except ImportError: print('Error: Cannot Import SQLAlchemy', file=sys.stderr) sys.exit(1) VERSION = "0.0" VERBOSE = False def main(): args = parse_cmd_line() vprint(args) datastore = Datastore(args.database, db_echo=args.verbose_db) session = datastore.connect() # Check table presence. Create any missing tables. print('Existing Tables: {}'.format(datastore.tables)) if not datastore.tables_exist([Coord]): print('Required tables not present, creating them.') datastore.create_tables([Coord]) # Verify that we have some Coordinate data to play with count = len(session.query(Coord).all()) print('{} Coord objects present, adding {} new coords.'.format( count, 10 - count)) while len(session.query(Coord).all()) < 10: session.add(Coord.generate_random()) session.commit() # Print the contents of the database print('Coord objects in the database.') for coord in session.query(Coord).all(): print(coord) datastore.close() return 0 def parse_cmd_line(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--version', help='Print the version and exit.', action='version', version='%(prog)s {}'.format(VERSION)) VerboseAction.add_parser_argument(parser) parser.add_argument( '--verbose-db', dest='verbose_db', default=False, action='store_true', help='Enable DB debugging output.') parser.add_argument( dest='database', metavar='DATABASE', default='sandbox.db', nargs='?') return parser.parse_args() def vprint(msg): """Conditionally print a verbose message.""" if VERBOSE: print(msg) class VerboseAction(argparse.Action): """Enable the verbose output mechanism.""" flag = '--verbose' help = 'Enable verbose output.' @classmethod def add_parser_argument(cls, parser): parser.add_argument(cls.flag, help=cls.help, action=cls) def __init__(self, option_strings, dest, **kwargs): super(VerboseAction, self).__init__(option_strings, dest, nargs=0, default=False, **kwargs) def __call__(self, parser, namespace, values, option_string=None): print('Enabling verbose output.') global VERBOSE VERBOSE = True setattr(namespace, self.dest, True) class BaseTable(object): @property def columns(self): raise NotImplemented def __repr__(self): return '<{cls}(id={id}, {attrs})>'.format( cls=self.__class__.__name__, id=self.id, attrs=', '.join(['{key}={val}'.format(key=key, val=val) for key, val in self.columns.items()])) class Coord(BaseTable, declarative_base()): __tablename__ = 'coords' id = sqla.Column(sqla.Integer, primary_key=True) sector_x = sqla.Column(sqla.Integer) sector_y = sqla.Column(sqla.Integer) system_x = sqla.Column(sqla.Integer) system_y = sqla.Column(sqla.Integer) @property def columns(self): return OrderedDict([('sector_x', self.sector_x), ('sector_y', self.sector_y), ('system_x', self.system_x), ('system_y', self.system_y)]) @classmethod def generate_random(self): coord_range = (0, 10) return Coord(sector_x=random.randint(*coord_range), sector_y=random.randint(*coord_range), system_x=random.randint(*coord_range), system_y=random.randint(*coord_range)) class Datastore(object): def __init__(self, database, db_echo=False): self.database = database self.db_echo = db_echo self.closed = True self.engine = None self.session_type = None self.session = None def connect(self): if self.engine is None: self.engine = sqla.create_engine( 'sqlite:///{}'.format(self.database), echo=self.db_echo) if self.session_type is None: self.session_type = sqla.orm.sessionmaker(bind=self.engine) self.session = self.session_type() self.closed = False return self.session def close(self): if self.session is not None: self.session.close() self.session = None self.closed = True def __enter__(self): self.connect() return self def __exit__(self, *args, **kwargs): self.close() @property def tables(self): inspector = sqla.inspect(self.engine) return inspector.get_table_names() def table_exists(self, table): return table.__tablename__ in self.tables def tables_exist(self, tables): return all(tbl.__tablename__ in self.tables for tbl in tables) def create_tables(self, tables): for table in tables: if not self.table_exists(table): table.metadata.create_all(self.engine) if __name__ == '__main__': try: sys.exit(main()) except SystemExit: sys.exit(0) except KeyboardInterrupt: print('...interrupted by user, exiting.') sys.exit(1) except Exception as exc: if VERBOSE: raise else: print('Unhandled Error:\n{}'.format(exc)) sys.exit(1)
Python
0.000001
bb940826d78e44a4098023e83d788b3d915b9b1f
Revert "Add the GitHub-supported format extensions."
grip/constants.py
grip/constants.py
# The supported extensions, as defined by https://github.com/github/markup supported_extensions = ['.md', '.markdown'] # The default filenames when no file is provided default_filenames = map(lambda ext: 'README' + ext, supported_extensions)
# The supported extensions, as defined by https://github.com/github/markup supported_extensions = [ '.markdown', '.mdown', '.mkdn', '.md', '.textile', '.rdoc', '.org', '.creole', '.mediawiki', '.wiki', '.rst', '.asciidoc', '.adoc', '.asc', '.pod', ] # The default filenames when no file is provided default_filenames = map(lambda ext: 'README' + ext, supported_extensions)
Python
0
7eaca8eea7adf6e1b8a487a78e9cde950d7221f7
Split out and speed up producer tests
test/test_producer_integration.py
test/test_producer_integration.py
import unittest import time from kafka import * # noqa from kafka.common import * # noqa from kafka.codec import has_gzip, has_snappy from .fixtures import ZookeeperFixture, KafkaFixture from .testutil import * class TestKafkaProducerIntegration(KafkaIntegrationTestCase): topic = 'produce_topic' @classmethod def setUpClass(cls): # noqa cls.zk = ZookeeperFixture.instance() cls.server = KafkaFixture.instance(0, cls.zk.host, cls.zk.port) cls.client = KafkaClient('%s:%d' % (cls.server.host, cls.server.port)) @classmethod def tearDownClass(cls): # noqa cls.client.close() cls.server.close() cls.zk.close() def test_produce_many_simple(self): start_offset = self.current_offset(self.topic, 0) produce = ProduceRequest(self.topic, 0, messages=[ create_message("Test message %d" % i) for i in range(100) ]) resp = self.client.send_produce_request([produce]) self.assertEqual(len(resp), 1) # Only one response self.assertEqual(resp[0].error, 0) # No error self.assertEqual(resp[0].offset, start_offset) # Initial offset of first message self.assertEqual(self.current_offset(self.topic, 0), start_offset+100) resp = self.client.send_produce_request([produce]) self.assertEqual(len(resp), 1) # Only one response self.assertEqual(resp[0].error, 0) # No error self.assertEqual(resp[0].offset, start_offset+100) # Initial offset of first message self.assertEqual(self.current_offset(self.topic, 0), start_offset+200) def test_produce_10k_simple(self): start_offset = self.current_offset(self.topic, 0) produce = ProduceRequest(self.topic, 0, messages=[ create_message("Test message %d" % i) for i in range(10000) ]) resp = self.client.send_produce_request([produce]) self.assertEqual(len(resp), 1) # Only one response self.assertEqual(resp[0].error, 0) # No error self.assertEqual(resp[0].offset, start_offset) # Initial offset of first message self.assertEqual(self.current_offset(self.topic, 0), start_offset+10000) def test_produce_many_gzip(self): start_offset = self.current_offset(self.topic, 0) message1 = create_gzip_message(["Gzipped 1 %d" % i for i in range(100)]) message2 = create_gzip_message(["Gzipped 2 %d" % i for i in range(100)]) produce = ProduceRequest(self.topic, 0, messages=[message1, message2]) resp = self.client.send_produce_request([produce]) self.assertEqual(len(resp), 1) # Only one response self.assertEqual(resp[0].error, 0) # No error self.assertEqual(resp[0].offset, start_offset) # Initial offset of first message self.assertEqual(self.current_offset(self.topic, 0), start_offset+200) @unittest.skip("All snappy integration tests fail with nosnappyjava") def test_produce_many_snappy(self): start_offset = self.current_offset(self.topic, 0) produce = ProduceRequest(self.topic, 0, messages=[ create_snappy_message(["Snappy 1 %d" % i for i in range(100)]), create_snappy_message(["Snappy 2 %d" % i for i in range(100)]), ]) resp = self.client.send_produce_request([produce]) self.assertEqual(len(resp), 1) # Only one response self.assertEqual(resp[0].error, 0) # No error self.assertEqual(resp[0].offset, start_offset) # Initial offset of first message self.assertEqual(self.current_offset(self.topic, 0), start_offset+200) def test_produce_mixed(self): start_offset = self.current_offset(self.topic, 0) msg_count = 1+100 messages = [ create_message("Just a plain message"), create_gzip_message(["Gzipped %d" % i for i in range(100)]), ] # All snappy integration tests fail with nosnappyjava if False and has_snappy(): msg_count += 100 messages.append(create_snappy_message(["Snappy %d" % i for i in range(100)])) produce = ProduceRequest(self.topic, 0, messages=messages) resp = self.client.send_produce_request([produce]) self.assertEqual(len(resp), 1) # Only one response self.assertEqual(resp[0].error, 0) # No error self.assertEqual(resp[0].offset, start_offset) # Initial offset of first message self.assertEqual(self.current_offset(self.topic, 0), start_offset+msg_count) def test_produce_100k_gzipped(self): start_offset = self.current_offset(self.topic, 0) req1 = ProduceRequest(self.topic, 0, messages=[ create_gzip_message(["Gzipped batch 1, message %d" % i for i in range(50000)]) ]) resp1 = self.client.send_produce_request([req1]) self.assertEqual(len(resp1), 1) # Only one response self.assertEqual(resp1[0].error, 0) # No error self.assertEqual(resp1[0].offset, start_offset) # Initial offset of first message self.assertEqual(self.current_offset(self.topic, 0), start_offset+50000) req2 = ProduceRequest(self.topic, 0, messages=[ create_gzip_message(["Gzipped batch 2, message %d" % i for i in range(50000)]) ]) resp2 = self.client.send_produce_request([req2]) self.assertEqual(len(resp2), 1) # Only one response self.assertEqual(resp2[0].error, 0) # No error self.assertEqual(resp2[0].offset, start_offset+50000) # Initial offset of first message self.assertEqual(self.current_offset(self.topic, 0), start_offset+100000)
Python
0
f5718764185ce1149ed291601e4fe28f9cd2be06
add single list module for mini-stl (Python)
python/mini-stl/single_list.py
python/mini-stl/single_list.py
#!/usr/bin/python -e # -*- encoding: utf-8 -*- # # Copyright (c) 2013 ASMlover. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list ofconditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materialsprovided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. class SingleList(object): class ListNode(object): def __init__(self): self.next = None self.data = None def __del__(self): self.next = None self.data = None def __init__(self): self.front_ = None self.rear_ = None self.size_ = 0 self.iter_ = None def __del__(self): self.clear() self.iter_ = None def __iter__(self): self.iter_ = self.front_ return self def next(self): if self.iter_ == None: raise StopIteration else: data = self.iter_.data self.iter_ = self.iter_.next return data def clear(self): while self.front_ != None: node = self.front_ self.front_ = self.front_.next del node self.front_ = None self.rear_ = None self.size_ = 0 def empty(self): return self.front_ == None def size(self): return self.size_ def push_back(self, x): node = self.ListNode() node.next = None node.data = x if self.front_ == None: self.front_ = node self.rear_ = node else: self.rear_.next = node self.rear_ = node self.size_ += 1 def push_front(self, x): node = self.ListNode() node.next = self.front_ node.data = x if self.front_ == None: self.rear_ = node self.front_ = node self.size_ += 1 def pop_front(self): if self.front_ == None: return node = self.front_ self.front_ = self.front_.next del node self.size_ -= 1 def front(self): if self.front_ == None: return None return self.front_.data def back(self): if self.rear_ == None: return None return self.rear_.data
Python
0
0be7f2fe05588d93eb478a4fa648d310055b3ce7
Add experimental generation code to make drafts from raster images
pyweaving/generators/raster.py
pyweaving/generators/raster.py
from .. import Draft from PIL import Image def point_threaded(im, warp_color=(0, 0, 0), weft_color=(255, 255, 255), shafts=40, max_float=8, repeats=2): """ Given an image, generate a point-threaded drawdown that attempts to represent the image. Results in a drawdown with bilateral symmetry from a non-symmetric source image. """ draft = Draft(num_shafts=shafts, liftplan=True) im.thumbnail((shafts, im.size[1]), Image.ANTIALIAS) im = im.convert('1') w, h = im.size assert w == shafts warp_pattern_size = ((2 * shafts) - 2) for __ in range(repeats): for ii in range(warp_pattern_size): if ii < shafts: shaft = ii else: shaft = warp_pattern_size - ii draft.add_warp_thread(color=warp_color, shaft=shaft) imdata = im.getdata() for __ in range(repeats): for yy in range(h): offset = yy * w pick_shafts = set() for xx in range(w): pixel = imdata[offset + xx] if not pixel: pick_shafts.add(xx) draft.add_weft_thread(color=weft_color, shafts=pick_shafts) return draft
Python
0
de325dbe53bbd28eddcbbf188f2689474994249b
add migration for new version of storedmessages
osmaxx-py/osmaxx/third_party_apps/stored_messages/migrations/0002_message_url.py
osmaxx-py/osmaxx/third_party_apps/stored_messages/migrations/0002_message_url.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('stored_messages', '0001_initial'), ] operations = [ migrations.AddField( model_name='message', name='url', field=models.URLField(blank=True, null=True), ), ]
Python
0
1472011cb8cd323357626443f714284feedfed62
add merge of ACIS provided data
scripts/climodat/use_acis.py
scripts/climodat/use_acis.py
"""Use data provided by ACIS to replace climodat data""" import requests import sys import psycopg2 import datetime SERVICE = "http://data.rcc-acis.org/StnData" def safe(val): if val in ['M', 'S']: return None if val == 'T': return 0.0001 try: return float(val) except: print("failed to convert %s to float, using None" % (repr(val),)) return None def main(station, acis_station): table = "alldata_%s" % (station[:2],) payload = {"sid": acis_station, "sdate": "1850-01-01", "edate": "2017-01-01", "elems": "maxt,mint,pcpn,snow,snwd"} req = requests.post(SERVICE, json=payload) j = req.json() pgconn = psycopg2.connect(database='coop', host='localhost', port=5555, user='mesonet') cursor = pgconn.cursor() for row in j['data']: date = row[0] (high, low, precip, snow, snowd) = map(safe, row[1:]) if all([a is None for a in (high, low, precip, snow, snowd)]): continue cursor.execute(""" UPDATE """ + table + """ SET high = %s, low = %s, precip = %s, snow = %s, snowd = %s WHERE station = %s and day = %s """, (high, low, precip, snow, snowd, station, date)) if cursor.rowcount == 0: date = datetime.datetime.strptime(date, '%Y-%m-%d') sday = "%02i%02i" % (date.month, date.day) print("Adding entry for %s" % (date,)) cursor.execute("""INSERT into """ + table + """ (station, day, high, low, precip, snow, snowd, sday, year, month, estimated) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'f') """, (station, date, high, low, precip, snow, snowd, sday, date.year, date.month)) cursor.close() pgconn.commit() if __name__ == '__main__': main(sys.argv[1], sys.argv[2])
Python
0
b333d95f3f4187b9d9b480ba8ff4985a62d65f41
Add tests for nginx version
tests/pytests/unit/modules/test_nginx.py
tests/pytests/unit/modules/test_nginx.py
import pytest import salt.modules.nginx as nginx from tests.support.mock import patch @pytest.fixture def configure_loader_modules(): return {nginx: {}} @pytest.mark.parametrize( "expected_version,nginx_output", [ ("1.2.3", "nginx version: nginx/1.2.3"), ("1", "nginx version: nginx/1"), ("9.1.100a1+abc123", "nginx version: nginx/9.1.100a1+abc123"), ( "42.9.13.1111111111.whatever", "nginx version: nginx/42.9.13.1111111111.whatever", ), ], ) def test_basic_nginx_version_output(expected_version, nginx_output): with patch.dict(nginx.__salt__, {"cmd.run": lambda *args, **kwargs: nginx_output}): assert nginx.version() == expected_version
Python
0.999999
e77b9a5dff36b3318759a18a786c7cc08bb8ac3e
Create Scramble_String.py
Array/Scramble_String.py
Array/Scramble_String.py
Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively. Below is one possible representation of s1 = "great": great / \ gr eat / \ / \ g r e at / \ a t To scramble the string, we may choose any non-leaf node and swap its two children. For example, if we choose the node "gr" and swap its two children, it produces a scrambled string "rgeat". rgeat / \ rg eat / \ / \ r g e at / \ a t We say that "rgeat" is a scrambled string of "great". Similarly, if we continue to swap the children of nodes "eat" and "at", it produces a scrambled string "rgtae". rgtae / \ rg tae / \ / \ r g ta e / \ t a We say that "rgtae" is a scrambled string of "great". Given two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1. class Solution: # @return a boolean def isScramble(self, s1, s2): if len(s1) != len(s2): return False if s1 == s2: return True length = len(list(s1)) if sorted(s1) != sorted(s2): return False for i in xrange(1,length): if self.isScramble(s1[:i],s2[:i]) and self.isScramble(s1[i:],s2[i:]): return True if self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:],s2[:-i]): return True return False # Note: # Condition: 1) length_s1 != length_s2 # 2) s1 == s2, s1与s2完全相等 # 3) sorted(s1) 与 sorted(s2)是不是相等 # 4) 比较s1[:i] s2[:i] and s1[i:],s2[i:] # 5) 比较s1[:i], s2[length_s2-i:] and s1[i:],s2[length_s2:-i]
Python
0.000081
156d3c7035e4b7867d1e715c0bac98cf16d24d77
add script to fix workspace info for search
src/scripts/fix_workspace_info.py
src/scripts/fix_workspace_info.py
""" Fixes workspace info to do the following. 1. Make sure the "narrative" metadata field contains an int that points to the narrative. 1. Make sure the "narrative_nice_name" metadata field is correct. 2. Make sure the "is_temporary" metadata field exists and is correct. 3. Adds a count of the number of narrative cells. 4. Does nothing at all if there's > 1 narrative in the workspace. Note that while this fetches the Narrative object, it doesn't modify it in any way. """ from biokbase.workspace.client import Workspace ws_url = "https://ci.kbase.us/services/workspace" def fix_workspace_info(ws_id, token, verbose=False): """ ws_id = id of the workspace to update. token = auth token of the user who owns the workspace. """ assert(token is not None) assert(str(ws_id).isdigit()) new_meta = dict() ws = Workspace(url=ws_url, token=token) if verbose: print("Checking workspace {}".format(ws_id)) # test if there's exactly 1 Narrative object in the workspace narr_obj_list = ws.list_objects({'ids': [ws_id]}) if len(narr_obj_list) != 1: if verbose: print("\tFound {} Narratives! Skipping this workspace".format(len(narr_obj_list))) return narr_info = narr_obj_list[0] narr_obj_id = narr_info[0] # fetch the workspace info and narrative object (with metadata) ws_info = ws.get_workspace_info({'id': int(ws_id)}) ws_meta = ws_info[8] narr_obj = ws.get_objects2({'objects': [{'ref': '{}/{}'.format(ws_id, narr_obj_id)}]})['data'][0] narr_name = narr_obj['data']['metadata']['name'] # 1. Test "narrative" key of ws_meta if str(narr_obj_id) != ws_meta.get('narrative'): new_meta['narrative'] = str(narr_obj_id) if verbose: print("\tUpdating id from {} -> {}".format(ws_meta.get('narrative'), narr_obj_id)) # 2. Test "is_temporary" key. # Should be true if there's only a single narrative version, and it's name is Untitled, and it only has a single markdown cell. # Should never reset to be temporary if it's not. # Really, this is here to add the field if it's not there, and to set things as non-temporary # if it looks like they should be. # So, if the marked 'is_temporary' is already false, do nothing. current_temp = ws_meta.get('is_temporary') if current_temp == 'true': # make sure it should be temporary. if narr_info[4] > 1 or narr_name != 'Untitled': if verbose: print("\tNarrative is named {} and has {} versions - marking not temporary".format(narr_name, narr_info[4])) new_meta['is_temporary'] = 'false' # get list of cells # if it's really REALLY old, it has a 'worksheets' field. Removed in Jupyter notebook format 4. if 'worksheets' in narr_obj['data']: cells = narr_obj['data']['worksheets'][0]['cells'] else: cells = narr_obj['data']['cells'] if len(cells) > 1 or cells[0]['cell_type'] != 'markdown': if verbose: print("\tNarrative has {} cells and the first is type {} - marking not temporary".format(len(cells), cells[0]['cell_type'])) new_meta['is_temporary'] = 'false' # 3. Test "narrative_nice_name" key meta_name = ws_meta.get('narrative_nice_name') if (meta_name is None and current_temp == 'false') or meta_name != narr_name: new_meta['narrative_nice_name'] = narr_name if verbose: print("\tUpdating 'narrative_nice_name' from {} -> {}".format(meta_name, narr_name)) # 4. Add the total cell count while we're at it. new_meta['cell_count'] = str(len(cells)) if verbose: print("\tAdding cell_count of {}".format(str(len(cells)))) ws.alter_workspace_metadata({'wsi': {'id': ws_id}, 'new': new_meta})
Python
0
f25b69a6ad6777576e31d0b01c4fc2c2bbe02788
Create new.py
simple_mqtt/templates/new.py
simple_mqtt/templates/new.py
Python
0.000001
0ee5d568ddc1f37abedb94f32d6b7da0439e6a4d
Create title_retriever.py
solutions/title_retriever.py
solutions/title_retriever.py
''' Script that will scrape the title of the given website ''' import urllib import re def getstock(title): regex = '<title>(.+?)</title>' #find all contents within title braces pattern = re.compile(regex) #converts regex into a pattern that can be understood by re module htmlfile = urllib.urlopen(title) #takes a string arguement htmltext = htmlfile.read() titles = re.findall(pattern,htmltext) return titles while True: try: title = str(raw_input("Please give me a url: ")) if not "http" in title: title = "http://"+title break except IOError: print "Sorry that url is not valid. Please try another." print getstock(title)[0]
Python
0.000003
071da9c0668d495e052baf5ad4d5bc9e068aa6a7
Create dict2xml.py
dict2xml.py
dict2xml.py
# Python Dictionary to XML converter # Written by github.com/Pilfer # @CodesStuff class dict2xml: def __init__(self, debug = False): self.debug = debug if self.debug: print "json2xml class has been loaded" def genXML(self,xmldict): tag = xmldict['tag'] attrs = [] kidstack = [] for attr in xmldict['attributes']: attrs.append(str("%s=\"%s\"") % (attr['name'],attr['value'])) if xmldict['children'] != None: for child in xmldict['children']: tmp = self.genXML(child) kidstack.append(tmp) if(len(kidstack) == 0): children = None else: children = "\n\t".join(kidstack) else: children = None xmlout = str("<%s %s>%s</%s>") % (tag, ' '.join(attrs), children if children != None else '',tag) return xmlout
Python
0
320da5dcc192d654d09ea631e9684f26e97795c0
add mitm script
reversing/400a-graphic/mitm.py
reversing/400a-graphic/mitm.py
vals = [0xdeadbeef,0xcafebabe,0xdeadbabe,0x8badf00d,0xb16b00b5,0xcafed00d,0xdeadc0de,0xdeadfa11,0xdefec8ed,0xdeadfeed,0xfee1dead,0xfaceb00b,0xfacefeed,0x000ff1ce,0x12345678,0x743029ab,0xdeed1234,0x00000000,0x11111111,0x11111112,0x11111113,0x42424242] start = 0xdeadbeef target = 0x764c648c group1 = vals[:11] group2 = vals[11:] print(len(group1), len(group2)) def recur(begin, rest): ret = [] if not rest: return [begin] for i in rest[0]: ret += recur(begin + [i], rest[1:]) return ret def all_possible(l): l = list(zip([0x0] * len(l), l)) return recur([], l) def xor_all(l, begin=0x0): for i in l: begin ^= i return begin group1_xors = {} group2_xors = {} for i in all_possible(group1): group1_xors[xor_all(i, start)] = i for i in all_possible(group2): group2_xors[xor_all(i, target)] = i intersect = set(group1_xors.keys()) & set(group2_xors.keys()) print(intersect) sol = intersect.pop() print(hex(sol)) valsol = group1_xors[sol] + group2_xors[sol] valsol = [i for i in valsol if i != 0] print(hex(xor_all(valsol, start))) print(list(map(hex, valsol)))
Python
0
d80f9ef4592cde488ece9f95b662f5e1e73eac42
change database
lib/wunderlist.py
lib/wunderlist.py
#!/usr/bin/env python from lib.base import BaseHandler import tornado.locale import tornado.httpserver import tornado.ioloop import tornado.options import tornado.web from datetime import datetime from tornado.options import define, options import pymongo if __name__ == "__main__": define("port", default=8000, type=int, help="run on the given port") class Application(tornado.web.Application): def __init__(self): handlers = [ (r"/wunderlist/search",WunSearchHandler), (r"/wunderlist/edit",WunEditHandler) ] settings = dict( debug=True ) conn = pymongo.Connection("localhost", 27017) self.db = conn["continuetry"] tornado.web.Application.__init__(self, handlers, **settings) class WunSearchHandler(BaseHandler): def get(self): qs = self.get_argument("qs", None) if not qs: no_qs = { "errmsg": "no_qs", "errcode": 1 } self.write(no_qs) return coll = self.db["sbooks"] coll_second = self.db["bbooks"] #add two vote attribute book_fields = ["isbn", "vote_count","voter","title", "alt", "author", "publisher", "image", "price", "tags", "isdonated", "donor"] book_fields_two = ["isbn","voter","title", "alt", "author", "publisher", "image", "price", "tags", "isdonated", "donor"] lst2 = [] lst3 = [] for key2 in coll.find({"isbn": int(qs)}): lst2.append(key2) if len(lst2) != 0: for key in lst2: del key["_id"] self.write(key) else: for key3 in coll_second.find({"isbn":qs}): lst3.append(key3) if len(lst3) != 0: for key in lst3: del key["_id"] self.write(key) else: not_exist = { "errmsg":"not_exist", "errcode":1 } self.write(not_exist) class WunEditHandler(BaseHandler): def post(self): isbn = self.get_argument("isbn",None) if not isbn: no_isbn = { "errmsg":"no_isbn", "errcode":1 } self.write(no_isbn) return Wunbook = {} lst = [] Wunbook["voter"] = lst Wunbook["vote_count"] = 0 for key in book_fields_two: if key == "voter": Wunbook[key].append(self.get_argument(key,None)) else: Wunbook[key] = self.get_argument(key,None) Wunbook["created_at"] = datetime.now().__format__("%Y-%m-%d %H:%M:%S") coll_second.insert(Wunbook) # Save success insert_sucs = { "errcode": 0 } self.write(insert_sucs) if __name__ == "__main__": tornado.options.parse_command_line() http_server = tornado.httpserver.HTTPServer(Application()) http_server.listen(options.port) tornado.ioloop.IOLoop.instance().start()
Python
0.000001
056bd290a4df08876109ef4e2da1115783a06f25
Add examples for setting classes attribute
examples/classes.py
examples/classes.py
from flask_table import Table, Col """If we want to put an HTML class onto the table element, we can set the "classes" attribute on the table class. This should be an iterable of that are joined together and all added as classes. If none are set, then no class is added to the table element. """ class Item(object): def __init__(self, name, description): self.name = name self.description = description class ItemTableOneClass(Table): classes = ['class1'] name = Col('Name') description = Col('Description') class ItemTableTwoClasses(Table): classes = ['class1', 'class2'] name = Col('Name') description = Col('Description') def one_class(items): table = ItemTableOneClass(items) # or {{ table }} in jinja print(table.__html__()) """Outputs: <table class="class1"> <thead> <tr> <th>Name</th> <th>Description</th> </tr> </thead> <tbody> <tr> <td>Name1</td> <td>Description1</td> </tr> </tbody> </table> """ def two_classes(items): table = ItemTableTwoClasses(items) # or {{ table }} in jinja print(table.__html__()) """Outputs: <table class="class1 class2"> <thead> <tr> <th>Name</th> <th>Description</th> </tr> </thead> <tbody> <tr> <td>Name1</td> <td>Description1</td> </tr> </tbody> </table> """ def main(): items = [Item('Name1', 'Description1')] # user ItemTableOneClass one_class(items) print('\n######################\n') # user ItemTableTwoClasses two_classes(items) if __name__ == '__main__': main()
Python
0
f16187d5943158d82fc87611f998283789b5ecdf
Add libarchive 3.1.2
packages/libarchive.py
packages/libarchive.py
Package ('libarchive', '3.1.2', sources = ['http://libarchive.org/downloads/%{name}-%{version}.tar.gz'], configure_flags = [ '--enable-bsdtar=shared', '--enable-bsdcpio=shared', '--disable-silent-rules', '--without-nettle'] )
Python
0.000004
b9b2b87f0d630de931765c1c9f448e295440e611
Create fetch_qt_version.py
fetch_qt_version.py
fetch_qt_version.py
"""Module to return the Qt version of a Qt codebase. This module provides a function that returns the version of a Qt codebase, given the toplevel qt5 repository directory. Note, the `qt5` directory applies to both Qt 5.x and Qt 6.x If it is run standalone with a python interpreter and not as part of another Python module, it must be run from the toplevel directory of a qt5 repository with the qtbase git submodule cloned and checked out. """ from __future__ import print_function # For python2 portability import os import sys import re def qt_version(qt5_dir: str) -> str: """Returns the Qt version of a Qt codebase""" if not os.path.exists(qt5_dir + "/qtbase"): print("qtbase doesn't exist. Please pass the path to a qt5 repo. aborting.", file=sys.stderr) return None changesFiles = os.listdir(qt5_dir + "/qtbase/dist") # Every version released has a 'changes-<version #>' file describing what # changed - we will use that to figure out the closest version number to # this checked out code. # Only include versions that have version numbers that conform to standard # version numbering rules (major.minor.release) regex = r"^changes-([0-9.]*)" src = re.search versions = [m.group(1) for changesFile in changesFiles for m in [src(regex, changesFile)] if m] # Fetch version from qtbase/.cmake.conf cmake_conf_path = qt5_dir + "/qtbase/.cmake.conf" if os.path.exists(cmake_conf_path): # Qt6 uses CMake, and we can determine version from .cmake.conf cmake_conf_file = open(cmake_conf_path, 'r') qt6_version = "" for line in cmake_conf_file: if "QT_REPO_MODULE_VERSION" in line: qt6_version = line.split('"')[1] break if qt6_version: versions.append(qt6_version) versions.sort(key=lambda s: list(map(int, s.split('.')))) return versions[-1] if __name__ == "__main__": if not os.path.exists("qtbase"): print("qtbase doesn't exist. Please run from base of qt5 repo. aborting.", file=sys.stderr) sys.exit(1) print(qt_version("."))
Python
0
f9b38f675df9752a4b5309df059c6d15a1e1b3c2
Add module for range support.
ex_range.py
ex_range.py
from collections import namedtuple from vintage_ex import EX_RANGE_REGEXP import location EX_RANGE = namedtuple('ex_range', 'left left_offset separator right right_offset') def get_range_parts(range): parts = EX_RANGE_REGEXP.search(range).groups() return EX_RANGE( left=parts[1], left_offset=parts[3] or '0', separator=parts[5], right=parts[7], right_offset=parts[9] or '0' ) def calculate_range(view, range): parsed_range = get_range_parts(range) if parsed_range.left == '%': left, left_offset = '1', '0' right, right_offset = '$', '0' elif parsed_range.separator: left, left_offset = parsed_range.left, parsed_range.left_offset right, right_offset = parsed_range.right, parsed_range.right_offset return calculate_range_part(view, left) + int(left_offset), \ calculate_range_part(view, right) + int(right_offset) def calculate_range_part(view, p): if p.isdigit(): return int(p) if p.startswith('/') or p.startswith('?'): if p.startswith('?'): return location.reverse_search(view, p[1:-1], end=view.sel()[0].begin()) return location.search(view, p[1:-1]) if p in ('$', '.'): return location.calculate_relative_ref(view, p)
Python
0
15cf6b5d35e2fbaf39d419ddbe5da1b16247ccaa
add test_parse_table_options.py
tests/test_parse_table_options.py
tests/test_parse_table_options.py
#!/usr/bin/env python3 """ `header` and `markdown` is checked by `test_to_bool` instead """ from .context import pandoc_tables import panflute def test_parse_table_options(): options = { 'caption': None, 'alignment': None, 'width': None, 'table-width': 1.0, 'header': True, 'markdown': True, 'include': None } raw_table_list = [['1', '2', '3', '4'], ['5', '6', '7', '8']] # check init is preserved assert pandoc_tables.parse_table_options( options, raw_table_list) == options # check caption options['caption'] = '**sad**' assert str(pandoc_tables.parse_table_options( options, raw_table_list )['caption'][0]) == 'Strong(Str(sad))' # check alignment options['alignment'] = 'LRCD' assert pandoc_tables.parse_table_options( options, raw_table_list )['alignment'] == [ 'AlignLeft', 'AlignRight', 'AlignCenter', 'AlignDefault' ] options['alignment'] = 'LRC' assert pandoc_tables.parse_table_options( options, raw_table_list )['alignment'] == [ 'AlignLeft', 'AlignRight', 'AlignCenter', 'AlignDefault' ] # check width options['width'] = [0.1, 0.2, 0.3, 0.4] assert pandoc_tables.parse_table_options( options, raw_table_list )['width'] == [0.1, 0.2, 0.3, 0.4] # auto-width raw_table_list = [ ['asdfdfdfguhfdhghfdgkla', '334\n2', '**la**', '4'], ['5', '6', '7', '8'] ] options['width'] = None options['table-width'] = 1.2 assert pandoc_tables.parse_table_options( options, raw_table_list )['width'] == [22 / 32 * 1.2, 3 / 32 * 1.2, 6 / 32 * 1.2, 1 / 32 * 1.2] return
Python
0.000005
71dd485685a481f21e03af6db5a4bc1f91a64ce9
Add service settings migration
nodeconductor/structure/migrations/0018_service_settings_plural_form.py
nodeconductor/structure/migrations/0018_service_settings_plural_form.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('structure', '0017_add_azure_service_type'), ] operations = [ migrations.AlterModelOptions( name='servicesettings', options={'verbose_name': 'Service settings', 'verbose_name_plural': 'Service settings'}, ), ]
Python
0
93a2caab2963423e40714ada59abcfeab5c57aea
Add NetBox pillar
salt/pillar/netbox.py
salt/pillar/netbox.py
# -*- coding: utf-8 -*- ''' A module that adds data to the Pillar structure from a NetBox API. Configuring the NetBox ext_pillar ==================================== .. code-block:: yaml ext_pillar: - netbox: api_url: http://netbox_url.com/api/ The following are optional, and determine whether or not the module will attempt to configure the ``proxy`` pillar data for use with the napalm proxy-minion: .. code-block:: yaml proxy_return: True proxy_username: admin api_token: 123abc Create a token in your NetBox instance at http://netbox_url.com/user/api-tokens/ By default, this module will query the NetBox API for the platform associated with the device, and use the 'NAPALM driver' field to set the napalm proxy-minion driver. (Currently only 'napalm' is supported for drivertype.) This module assumes you will use SSH keys to authenticate to the network device If password authentication is desired, it is recommended to create another ``proxy`` key in pillar_roots (or git_pillar) with just the ``passwd`` key and use :py:func:`salt.renderers.gpg <salt.renderers.gpg>` to encrypt the value. If any additional options for the proxy setup are needed they should also be configured in pillar_roots. ''' from __future__ import absolute_import, print_function, unicode_literals import logging try: import requests import ipaddress _HAS_DEPENDENCIES = True except ImportError: _HAS_DEPENDENCIES = False log = logging.getLogger(__name__) def __virtual__(): return _HAS_DEPENDENCIES def ext_pillar(minion_id, pillar, *args, **kwargs): ''' Query NetBox API for minion data ''' # Pull settings from kwargs api_url = kwargs['api_url'].rstrip('/') api_token = kwargs.get('api_token', None) proxy_username = kwargs.get('proxy_username', None) proxy_return = kwargs.get('proxy_return', True) ret = {} headers = {} if api_token: headers['Authorization'] = 'Token ' + api_token # Fetch device from API device_results = requests.get( api_url + '/dcim/devices/', params={'name': minion_id, }, headers=headers, ) # Check status code for API call if device_results.status_code != requests.codes.ok: log.warn('API query failed for "%s", status code: %d', minion_id, device_results.status_code) # Assign results from API call to "netbox" key try: devices = device_results.json()['results'] if len(devices) == 1: ret['netbox'] = devices[0] elif len(devices) > 1: log.error('More than one device found for "%s"', minion_id) except Exception: log.error('Device not found for "%s"', minion_id) if proxy_return: # Attempt to add "proxy" key, based on platform API call try: # Fetch device from API platform_results = requests.get( ret['netbox']['platform']['url'], headers=headers, ) # Check status code for API call if platform_results.status_code != requests.codes.ok: log.info('API query failed for "%s", status code: %d', minion_id, platform_results.status_code) # Assign results from API call to "proxy" key if the platform has a # napalm_driver defined. napalm_driver = platform_results.json().get('napalm_driver') if napalm_driver: ret['proxy'] = { 'host': str(ipaddress.IPv4Interface( ret['netbox']['primary_ip4']['address']).ip), 'driver': napalm_driver, 'proxytype': 'napalm', } if proxy_username: ret['proxy']['username'] = proxy_username except Exception: log.debug( 'Could not create proxy config data for "%s"', minion_id) return ret
Python
0
578f532bb7a6c75dd6526b9fe130879e0a7cc0e6
Pick best out of two outputs
session2/select_best_output.py
session2/select_best_output.py
import argparse, logging, codecs from nltk.translate.bleu_score import sentence_bleu as bleu def setup_args(): parser = argparse.ArgumentParser() parser.add_argument('out1', help = 'Output 1') parser.add_argument('out2', help = 'Output 2') parser.add_argument('input', help = 'Input') parser.add_argument('output', help='Selected Output') args = parser.parse_args() return args def main(): logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) args = setup_args() logging.info(args) out1_lines = codecs.open(args.out1, 'r', 'utf-8').readlines() out2_lines = codecs.open(args.out2, 'r', 'utf-8').readlines() picked_num1 = 0 picked_num2 = 0 input_lines = codecs.open(args.input, 'r').readlines() fw = codecs.open(args.output, 'w', 'utf-8') for index, (out1, out2, input) in enumerate(zip(out1_lines, out2_lines, input_lines)): q2 = input.split('END')[2] bleu_1 = bleu([q2.split()], out1, weights=(1.0,)) bleu_2 = bleu([q2.split()], out2, weights=(1.0,)) logging.info('Index:%d Bleu1: %f Bleu2: %f'% (index, bleu_1, bleu_2)) if bleu_1 > bleu_2: picked_num1 += 1 fw.write(out1.strip() + '\n') else: picked_num2 += 1 fw.write(out2.strip() + '\n') if __name__ == '__main__': main()
Python
1
a595665ab75c5995f0cb3af7463215f9cd7aabf7
Add Miguel Grinberg's Flask API decorators from his PyCon talk
sandman/decorators.py
sandman/decorators.py
import functools import hashlib from flask import jsonify, request, url_for, current_app, make_response, g from .rate_limit import RateLimit from .errors import too_many_requests, precondition_failed, not_modified def json(f): @functools.wraps(f) def wrapped(*args, **kwargs): rv = f(*args, **kwargs) status_or_headers = None headers = None if isinstance(rv, tuple): rv, status_or_headers, headers = rv + (None,) * (3 - len(rv)) if isinstance(status_or_headers, (dict, list)): headers, status_or_headers = status_or_headers, None if not isinstance(rv, dict): rv = rv.to_json() rv = jsonify(rv) if status_or_headers is not None: rv.status_code = status_or_headers if headers is not None: rv.headers.extend(headers) return rv return wrapped def rate_limit(limit, per, scope_func=lambda: request.remote_addr): def decorator(f): @functools.wraps(f) def wrapped(*args, **kwargs): if current_app.config['USE_RATE_LIMITS']: key = 'rate-limit/%s/%s/' % (f.__name__, scope_func()) limiter = RateLimit(key, limit, per) if not limiter.over_limit: rv = f(*args, **kwargs) else: rv = too_many_requests('You have exceeded your request rate') #rv = make_response(rv) g.headers = { 'X-RateLimit-Remaining': str(limiter.remaining), 'X-RateLimit-Limit': str(limiter.limit), 'X-RateLimit-Reset': str(limiter.reset) } return rv else: return f(*args, **kwargs) return wrapped return decorator def paginate(max_per_page=10): def decorator(f): @functools.wraps(f) def wrapped(*args, **kwargs): page = request.args.get('page', 1, type=int) per_page = min(request.args.get('per_page', max_per_page, type=int), max_per_page) query = f(*args, **kwargs) p = query.paginate(page, per_page) pages = {'page': page, 'per_page': per_page, 'total': p.total, 'pages': p.pages} if p.has_prev: pages['prev'] = url_for(request.endpoint, page=p.prev_num, per_page=per_page, _external=True, **kwargs) else: pages['prev'] = None if p.has_next: pages['next'] = url_for(request.endpoint, page=p.next_num, per_page=per_page, _external=True, **kwargs) else: pages['next'] = None pages['first'] = url_for(request.endpoint, page=1, per_page=per_page, _external=True, **kwargs) pages['last'] = url_for(request.endpoint, page=p.pages, per_page=per_page, _external=True, **kwargs) return jsonify({ 'urls': [item.get_url() for item in p.items], 'meta': pages }) return wrapped return decorator def cache_control(*directives): def decorator(f): @functools.wraps(f) def wrapped(*args, **kwargs): rv = f(*args, **kwargs) rv = make_response(rv) rv.headers['Cache-Control'] =', '.join(directives) return rv return wrapped return decorator def no_cache(f): return cache_control('no-cache', 'no-store', 'max-age=0')(f) def etag(f): @functools.wraps(f) def wrapped(*args, **kwargs): # only for HEAD and GET requests assert request.method in ['HEAD', 'GET'],\ '@etag is only supported for GET requests' rv = f(*args, **kwargs) rv = make_response(rv) etag = '"' + hashlib.md5(rv.get_data()).hexdigest() + '"' rv.headers['ETag'] = etag if_match = request.headers.get('If-Match') if_none_match = request.headers.get('If-None-Match') if if_match: etag_list = [tag.strip() for tag in if_match.split(',')] if etag not in etag_list and '*' not in etag_list: rv = precondition_failed() elif if_none_match: etag_list = [tag.strip() for tag in if_none_match.split(',')] if etag in etag_list or '*' in etag_list: rv = not_modified() return rv return wrapped
Python
0
ca8d7773a2d1a5ce4195ce693ccd66bbf53af394
Read in proteinGroupts.txt from MS data
proteinGroupsParser.py
proteinGroupsParser.py
# -*- coding: utf-8 -*- """ Created on Sat Oct 10 08:46:25 2015 @author: student """ import pandas as pd #import numpy as np # read in file #peptideNames = """'Protein IDs’, 'Majority protein IDs’, 'Peptide counts (all)’, 'Peptide counts (razor+unique)’, 'Peptide counts (unique)’, 'Fasta headers’, 'Number of proteins’, 'Peptides’,'Razor + unique peptides’, 'Unique peptides’,'Peptides Control_Ub’, 'Peptides Control_UbP’,'Peptides Control_WCL’, 'Peptides Control_WCLP’,'Peptides Pynd_5FC_Ub’, 'Peptides Pynd_5FC_UbP’,'Peptides Pynd_5FC_WCL’, 'Peptides Pynd_5FC_WCLP’,'Peptides Pynd_AlkKO_Ub’, 'Peptides Pynd_AlkKO_UbP’,'Peptides Pynd_AlkKO_WCL’, 'Peptides Pynd_AlkKO_WCLP’""" #colIndices = 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 18, 19, 20, 21, 38, 39, 40, 41, 42, 43, 44, 45, 68, 69, 70, 71, 88, 89, 90, 91, 92, 93, 94, 95, 118, 119, 120, 121, 138, 139, 140, 141, 142, 143, 144, 145, 160, 161, 162, 163, 164, 165, 166, 167, 176, 177, 178, 179, 196, 197, 198, 199, 200, 201, 202, 203, 226, 227, 228, 229, 246, 247, 248, 249, 250, 251, 252, 253, 268, 277, 278, 279, 280, 297, 298, 299, 300, 301, 302, 303, 304, 327, 328, 329, 330, 347, 348, 349, 350, 351, 352, 353, 354, 377, 378, 379, 380, 397, 398, 399, 400, 401, 402, 403, 404, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433 data = '/Users/student/Downloads/PUBS 2015 MS files/proteinGroups.txt' df = pd.read_table(data, usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 18, 19, 20, 21, 38, 39, 40, 41, 42, 43, 44, 45, 68, 69, 70, 71, 88, 89, 90, 91, 92, 93, 94, 95, 118, 119, 120, 121, 138, 139, 140, 141, 142, 143, 144, 145, 160, 161, 162, 163, 164, 165, 166, 167, 176, 177, 178, 179, 196, 197, 198, 199, 200, 201, 202, 203, 226, 227, 228, 229, 246, 247, 248, 249, 250, 251, 252, 253, 268, 277, 278, 279, 280, 297, 298, 299, 300, 301, 302, 303, 304, 327, 328, 329, 330, 347, 348, 349, 350, 351, 352, 353, 354, 377, 378, 379, 380, 397, 398, 399, 400, 401, 402, 403, 404, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433]) # Junk #print df.dtypes #print df['Intensity'] #for i in df.index: # for j in df.columns: # print df.index[i], df.columns[j] # print df.index.values #print df.columns.values #print df.keys # print '%s, %s' % (df.index.values, df[i].columns.values)
Python
0
3509585cd14bb51fb00b60df1dcb295bc561d679
Add _version.py file
py/desidatamodel/_version.py
py/desidatamodel/_version.py
__version__ = '0.2.0.dev71'
Python
0.000004
b3383e6c428eccdd67ddc4cfa90e6d22da35412a
Add lib/sccache.py helper script
script/lib/sccache.py
script/lib/sccache.py
import os import sys from config import TOOLS_DIR VERSION = '0.2.6' SUPPORTED_PLATFORMS = { 'cygwin': 'windows', 'darwin': 'mac', 'linux2': 'linux', 'win32': 'windows', } def is_platform_supported(platform): return platform in SUPPORTED_PLATFORMS def get_binary_path(): platform = sys.platform if not is_platform_supported(platform): return None platform_dir = SUPPORTED_PLATFORMS[platform] path = os.path.join(TOOLS_DIR, 'sccache', VERSION, platform_dir, 'sccache') if platform_dir == 'windows': path += '.exe' return path
Python
0.000001
b7459feac37753928fcfc1fe25a0f40d21d89ecf
add collections07.py
trypython/stdlib/collections07.py
trypython/stdlib/collections07.py
# coding: utf-8 """ collections.namedtupleについてのサンプルです。 namedtupleの基本的な使い方については、collections04.py を参照。 """ import collections as cols from trypython.common.commoncls import SampleBase from trypython.common.commonfunc import pr class Sample(SampleBase): def exec(self): MyVal01 = cols.namedtuple('MyVal01', ['name', 'value']) obj1 = MyVal01('hello world', 'value01') pr('obj', obj1) # namedtuple は、__dict__ を持たない try: pr('__dict__', obj1.__dict__) except AttributeError as e: pr('__dict__', e) # namedtuple は、__slots__ に 空タプルが設定される pr('__slots__', obj1.__slots__) # ------------------------------------------------------------ # namedtuple は、通常のtupleと同様に利用できる。 # さらに、以下の3つのメソッドを持つ。 # ・_make # ・_asdict # ・_replace # ------------------------------------------------------------ # ------------------------------------------------------------ # _make メソッド # -------------------- # 既存のsequence, iterable から新しいオブジェクトを構築する。 # csvやデータベースなどの行からオブジェクトを作成するのに便利。 # ------------------------------------------------------------ rows = (['hello', 'value01'], ['world', 'value02']) for item in (MyVal01._make(row) for row in rows): pr('item', item) # ------------------------------------------------------------ # _asdict メソッド # -------------------- # フィールド名と値のOrderedDictを返す。 # 戻り値が OrderedDict なので、フィールドの並び順の通りに取得できる。 # (*) OrderedDictになったのは、python 3.1 から。 # ------------------------------------------------------------ obj_dict = obj1._asdict() pr('obj_dict', obj_dict) # 辞書から namedtuple を構築する場合は **kwargs 形式で渡す obj2 = MyVal01(**obj_dict) pr('obj2', obj2) pr('eq', obj1 == obj2) # ------------------------------------------------------------ # _replace メソッド # -------------------- # 指定したフィールドの値を置き換えた、新しい namedtuple を返す。 # namedtuple は、immutableなので、常に新しいオブジェクトを返す。 # ------------------------------------------------------------ obj3 = obj2._replace(name='world hello', value='value03') pr('obj3', obj3) pr('eq', obj3 == obj2) # ------------------------------------------------------------ # namedtuple に、独自のメソッドを持たせる場合は # namedtuple を親クラスにしたクラスを新たに定義する。 # ------------------------------------------------------------ class MyVal02(cols.namedtuple('MyVal02', ['name'])): __slots__ = () @property def upper_name(self): return self.name.upper() obj4 = MyVal02('hello world 2') pr('obj4.name', obj4.name) pr('obj4.upper_name', obj4.upper_name) def go(): obj = Sample() obj.exec() if __name__ == '__main__': go()
Python
0
8302536cafa07a078cfb6629b5e9cc85e1798e1e
Add Appalachian Regional Commission.
inspectors/arc.py
inspectors/arc.py
#!/usr/bin/env python import datetime import logging import os from urllib.parse import urljoin from bs4 import BeautifulSoup from utils import utils, inspector # http://www.arc.gov/oig # Oldest report: 2003 # options: # standard since/year options for a year range to fetch from. # # Notes for IG's web team: # AUDIT_REPORTS_URL = "http://www.arc.gov/about/OfficeofInspectorGeneralAuditandInspectionReports.asp" SEMIANNUAL_REPORTS_URL = "http://www.arc.gov/about/OfficeofinspectorGeneralSemiannualReports.asp" def run(options): year_range = inspector.year_range(options) # Pull the audit reports for url in [AUDIT_REPORTS_URL, SEMIANNUAL_REPORTS_URL]: doc = BeautifulSoup(utils.download(url)) results = doc.select("table p > a") for result in results: report = report_from(result, url, year_range) if report: inspector.save_report(report) def report_from(result, landing_url, year_range): report_url = urljoin(landing_url, result.get('href')) report_filename = report_url.split("/")[-1] report_id, _ = os.path.splitext(report_filename) try: title = result.parent.find("em").text except AttributeError: title = result.parent.contents[0] estimated_date = False try: published_on_text = title.split("–")[-1].strip() published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y') except ValueError: # For reports where we can only find the year, set them to Nov 1st of that year published_on_year = int(result.find_previous("strong").text.replace("Fiscal Year ", "")) published_on = datetime.datetime(published_on_year, 11, 1) estimated_date = True if published_on.year not in year_range: logging.debug("[%s] Skipping, not in requested range." % report_url) return report = { 'inspector': 'arc', 'inspector_url': 'http://www.arc.gov/oig', 'agency': 'arc', 'agency_name': 'Appalachian Regional Commission', 'report_id': report_id, 'url': report_url, 'title': title, 'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"), } if estimated_date: report['estimated_date'] = estimated_date return report utils.run(run) if (__name__ == "__main__") else None
Python
0
d834216bcc93eac7b324d95498d9580e3f769dfa
Add Government Printing Office.
inspectors/gpo.py
inspectors/gpo.py
#!/usr/bin/env python import datetime import logging from urllib.parse import urljoin from bs4 import BeautifulSoup from utils import utils, inspector # http://www.gpo.gov/oig/ # Oldest report: 2004 # options: # standard since/year options for a year range to fetch from. # # Notes for IG's web team: # AUDIT_REPORTS_URL = "http://www.gpo.gov/oig/audits.htm" SEMIANNUAL_REPORTS_URL = "http://www.gpo.gov/oig/semi-anual.htm" HEADER_TITLES = [ 'Report #', 'Date', ] def run(options): year_range = inspector.year_range(options) # Pull the reports for url in [AUDIT_REPORTS_URL, SEMIANNUAL_REPORTS_URL]: doc = BeautifulSoup(utils.download(url)) results = doc.select("div.section1 div.ltext > table tr") if not results: results = doc.select("td.three-col-layout-middle div.ltext > table tr") if not results: raise AssertionError("No report links found for %s" % url) for result in results: if (not result.text.strip() or result.find("th") or result.find("strong") or result.contents[1].text in HEADER_TITLES ): # Skip header rows continue report = report_from(result, url, year_range) if report: inspector.save_report(report) def report_from(result, landing_url, year_range): title = result.select("td")[-1].text if "contains sensitive information" in title: unreleased = True report_url = None report_id = "-".join(title.split())[:50] else: unreleased = False link = result.find("a") report_id = link.text report_url = urljoin(landing_url, link.get('href')) estimated_date = False try: published_on = datetime.datetime.strptime(report_id.strip(), '%m.%d.%y') except ValueError: published_on_year_text = result.find_previous("th").text published_on_year = int(published_on_year_text.replace("Fiscal Year ", "")) published_on = datetime.datetime(published_on_year, 11, 1) estimated_date = True if published_on.year not in year_range: logging.debug("[%s] Skipping, not in requested range." % report_url) return report = { 'inspector': 'gpo', 'inspector_url': 'http://www.gpo.gov/oig/', 'agency': 'gpo', 'agency_name': 'Government Printing Office', 'file_type': 'pdf', 'report_id': report_id, 'url': report_url, 'title': title, 'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"), } if estimated_date: report['estimated_date'] = estimated_date if unreleased: report['unreleased'] = unreleased report['landing_url'] = landing_url return report utils.run(run) if (__name__ == "__main__") else None
Python
0
c2eeb3eb6909b6763e4ae6d4110f316f4d9b4e65
Add a generator for clocking primitives
timing/tools/gen_clk.py
timing/tools/gen_clk.py
import random # CABGA400 clk_pins = "J2 K2 K3 L5 L7 M2 V1 Y2 R5 Y5 R7 W7 T10 W10 W11 Y12 W14 T13 N19 M19 M17 L20 L16 L13 E13 E12".split(" ") banks = "7 7 7 6 6 6 5 5 5 5 4 4 4 4 3 3 3 3 2 2 2 1 1 1 0 0".split(" ") plls = ["PLL_LLC", "PLL_LRC", "PLL_ULC"] pll_pins = ["CLKOP", "CLKOS", "CLKOS2", "CLKOS3", "CLKOS4", "CLKOS5"] eclkdiv = [ "ECLKDIV_CORE_R55C48A", "ECLKDIV_CORE_R55C48B", "ECLKDIV_CORE_R55C48C", "ECLKDIV_CORE_R55C48D", "ECLKDIV_CORE_R55C49A", "ECLKDIV_CORE_R55C49B", "ECLKDIV_CORE_R55C49C", "ECLKDIV_CORE_R55C49D", "ECLKDIV_CORE_R55C50A", "ECLKDIV_CORE_R55C50B", "ECLKDIV_CORE_R55C50C", "ECLKDIV_CORE_R55C50D", ] central_dcc = [ "DCC_C0", "DCC_C1", "DCC_C2", "DCC_C3", ] central_dcs = [ "DCS0", ] N = 20 print("module top(") for pin, bank in zip(clk_pins, banks): print(' (* LOC="{p}", IO_TYPE="{t}" *) input pin_{p},'.format(p=pin, t=("LVCMOS18H" if bank in ("3", "4", "5") else "LVCMOS33"))) for pll in plls: print(' input pllin_{},'.format(pll)) print(" input d,") print(" output q") print(");") print(" reg [{}:0] r;".format(N)) print(" always @* r[0] = d;") print(" assign q = r[{}];".format(N)) print(" wire [3:0] ctrl = r[5:2];") clock_sources = [] for pin in clk_pins: clock_sources.append(("pin", pin)) for pll in plls: for pll_pin in pll_pins: clock_sources.append(("pll", (pll, pll_pin))) for div in eclkdiv: clock_sources.append(("eclkdiv", div)) for dcc in central_dcc: clock_sources.append(("dcc", dcc)) for dcs in central_dcs: clock_sources.append(("dcs", dcs)) random.shuffle(clock_sources) used_plls = set() j = 0 def get_source(): global j srctype, src = clock_sources.pop() if srctype == "pin": clkwire = "pin_{}".format(src) elif srctype == "pll": used_plls.add(src[0]) clkwire = "{}_{}".format(src[0], src[1]) elif srctype == "eclkdiv": print(' wire eclkdivo_{};'.format(src)) print(' (* LOC="{}" *)'.format(src)) print(' ECLKDIV_CORE #(') print(' .ECLK_DIV("{}")'.format(random.choice(["DISABLE", "2", "3P5", "4", "5"]))) print(' ) div_{} ('.format(src)) print(' .ECLKIN(r[{}]),'.format(j + 1)) print(' .SLIP(ctrl[1]),') print(' .DIVRST(ctrl[2]),') print(' .DIVOUT(eclkdivo_{})'.format(src)) print(' );') clkwire = "eclkdivo_{}".format(src) elif srctype == "dcc": print(' wire dcco_{};'.format(src)) print(' (* LOC="{}" *)'.format(src)) print(' DCC cdcc_{d} (.CLKI(r[{r}]), .CE(ctrl[1]), .CLKO(dcco_{d}));'.format(r=j+1, d=src)) clkwire = "dcco_{}".format(src) elif srctype == "dcs": print(' wire dcso;') clka = get_source() clkb = get_source() print(' DCS dcs_{} ('.format(src)) print(' .CLK0({}),'.format(clka)) print(' .CLK1({}),'.format(clkb)) print(' .SEL(ctrl[0]),') print(' .SELFORCE(ctrl[1]),') print(' .DCSOUT(dcso)') print(' );') clkwire = "dcso" if srctype not in ("dcc", "dcs") and random.randint(0, 1) == 1: dccwire = "gclk_{}".format(j) print(' wire {};'.format(dccwire)) print(' DCC #(.DCCEN("1")) dcc_{i} (.CLKI({clki}), .CE(ctrl[{ctrl}]), .CLKO({clko}));'.format( i=j, clki=clkwire, ctrl=random.randint(0, 3), clko=dccwire )) clkwire = dccwire j += 1 return clkwire for i in range(N): clkwire = get_source() print(' always @(posedge {clk}) r[{i} + 1] <= r[{i}];'.format(clk=clkwire, i=i)) for pll in used_plls: print() for sig in pll_pins: print(' wire {}_{};'.format(pll, sig)) print(' (* LOC="{}" *)'.format(pll)) print(' PLL_CORE pll_{} ('.format(pll)) print(' .REFCK(pllin_{}),'.format(pll)) for sig in pll_pins: print(' .{}({}_{}),'.format(sig, pll, sig)) print(' .FBKCK({}_CLKOP)'.format(pll)) print(' );') print('endmodule')
Python
0
d3c9a6bdc1b8cfb56f9ad408f5257b9ac518b2ac
Add preprocessor
scripts/preprocess.py
scripts/preprocess.py
#!/usr/bin/env python import argparse import os def preprocess(path): includes = set() res = [] def preprocess_line(path, line): if line.strip().startswith('#'): line = line.strip() if line.startswith('#include') and len(line.split('"')) >= 3: lx = line.split('"') relpath = ''.join(lx[1:len(lx) - 1]) target_path = os.path.dirname(path) + '/' + relpath if target_path.startswith('/'): target_path = target_path[1:] preprocess_path(os.path.normpath(target_path)) return '\n' elif line.startswith('#pragma'): if ''.join(line.split(' ')[1:]).strip() == 'once': return '' return line def preprocess_path(path): if path not in includes: has_not_started = True includes.add(path) for line in open(path): s = preprocess_line(path, line) if has_not_started and s.strip() is not "": prefix = '//===== {} =====\n\n'.format(os.path.basename(path)) res.append(prefix) has_not_started = False res.append(s.rstrip()) preprocess_path(path) print('\n'.join(res)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('filepath', nargs=1, help='cpp file') args = parser.parse_args() filepath = args.filepath[0] preprocess(filepath)
Python
0.000138
e285c097be60f9db5fae075f21b7450f403640d2
add scaffold for an AvailabilityAssessment class
python/cvmfs/availability.py
python/cvmfs/availability.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created by René Meusel This file is part of the CernVM File System auxiliary tools. """ import cvmfs class WrongRepositoryType(Exception): def __init__(self, repo, expected_type): assert repo.type != expected_type self.repo = repo self.expected_type = expected_type def __str__(self): return self.repo.fqrn + " is of type '" + self.repo.type + "' but '" + self.expected_type + "' was expected" class AvailabilityAssessment: def _check_repo_type(self, repo, expected_type): if repo.has_repository_type() and repo.type != expected_type: raise WrongRepositoryType(repo, expected_type) return True; def __init__(self, stratum0_repository, stratum1_repositories = []): self._check_repo_type(stratum0_repository, 'stratum0') for stratum1 in stratum1_repositories: self._check_repo_type(stratum1, 'stratum1') self.stratum0 = stratum0_repository self.stratum1s = stratum1_repositories def assess(self): pass
Python
0
0e9da5d0099b9c7b527250d6bf8051242e77103a
Add script for showing the results
triangular_lattice/distances_analyze.py
triangular_lattice/distances_analyze.py
#!/usr/bin/env python # -*- coding:utf-8 -*- # # written by Shotaro Fujimoto # 2016-10-12 import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D if __name__ == '__main__': # result_data_path = "./results/data/distances/beta=0.00_161012_171430.npz" # result_data_path = "./results/data/distances/beta=5.00_161012_171649.npz" # result_data_path = "./results/data/distances/beta=10.00_161012_172119.npz" # result_data_path = "./results/data/distances/beta=15.00_161012_172209.npz" # result_data_path = "./results/data/distances/beta=20.00_161012_172338.npz" data = np.load(result_data_path) beta = data['beta'] num_of_strings = data['num_of_strings'] L = data['L'] frames = data['frames'] distance_list = data['distance_list'] path_length = data['path_length'] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') hist, xedges, yedges = np.histogram2d(distance_list, path_length, bins=100) xpos, ypos = np.meshgrid(xedges[:-1] + (xedges[1] - xedges[0]) / 2., yedges[:-1] + (yedges[1] - yedges[0]) / 2.) zpos = hist.T ax.plot_wireframe(xpos, ypos, zpos, rstride=1) ax.plot(xpos[0], xpos[0], lw=2) ax.set_aspect('equal') ax.set_xlim(xedges[0], xedges[-1]) ax.set_ylim(yedges[0], yedges[-1]) ax.set_xlabel('Distance') ax.set_ylabel('Path length') ax.set_title('Path length and distances between two points in the cluster' + r'($\beta = %2.2f$)' % beta) plt.show()
Python
0
29e170f9f92f8327c71a9dfc2b9fb9e18947db72
create predictions on pre-trained models
source/generate_predictions.py
source/generate_predictions.py
import numpy as np import pandas as pd from sklearn.externals import joblib from data_preprocessing import join_strings from model import mlb, count_vectorizer_test_x, tfidf_vectorizer_test_x, file_cnt, file_tfidf count_vectorizer_model, tfidf_vectorizer_model = joblib.load(file_cnt), joblib.load(file_tfidf) print("Both the trained models have been imported successfully!") print() print("Making predictions...") pred1 = count_vectorizer_model.predict(count_vectorizer_test_x.toarray()) pred2 = tfidf_vectorizer_model.predict(tfidf_vectorizer_test_x.toarray()) # Combine predictions and map the labels if the values do not equal 0, else assign empty string arr = np.where((pred1 + pred2) != 0, mlb.classes_, "") # Load the array into a DataFrame constructor and join non-empty strings predictions = pd.DataFrame(arr).apply(join_strings, axis=1).to_frame("tags") # Submit predictions print("Submitting predictions...") predictions.to_csv("tags.tsv", index=False) print("done")
Python
0.000001
c6358b282ea28dd113c9053dab0fe2fa66f4d59d
Allow metrics to start with a braces expression
webapp/graphite/render/grammar.py
webapp/graphite/render/grammar.py
from graphite.thirdparty.pyparsing import * ParserElement.enablePackrat() grammar = Forward() expression = Forward() # Literals intNumber = Combine( Optional('-') + Word(nums) )('integer') floatNumber = Combine( Optional('-') + Word(nums) + Literal('.') + Word(nums) )('float') aString = quotedString('string') # Use lookahead to match only numbers in a list (can't remember why this is necessary) afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd()) number = Group( (floatNumber + afterNumber) | (intNumber + afterNumber) )('number') boolean = Group( CaselessKeyword("true") | CaselessKeyword("false") )('boolean') # Function calls arg = Group( boolean | number | aString | expression ) args = delimitedList(arg)('args') func = Word(alphas+'_', alphanums+'_')('func') call = Group( func + Literal('(').suppress() + args + Literal(')').suppress() )('call') # Metric pattern (aka. pathExpression) validMetricChars = alphanums + r'''!#$%&"'*+-.:;<=>?@[\]^_`|~''' pathExpression = Combine( Optional(Word(validMetricChars)) + Combine( ZeroOrMore( Group( Literal('{') + Word(validMetricChars + ',') + Literal('}') + Optional( Word(validMetricChars) ) ) ) ) )('pathExpression') expression << Group(call | pathExpression)('expression') grammar << expression def enableDebug(): for name,obj in globals().items(): try: obj.setName(name) obj.setDebug(True) except: pass
from graphite.thirdparty.pyparsing import * ParserElement.enablePackrat() grammar = Forward() expression = Forward() # Literals intNumber = Combine( Optional('-') + Word(nums) )('integer') floatNumber = Combine( Optional('-') + Word(nums) + Literal('.') + Word(nums) )('float') aString = quotedString('string') # Use lookahead to match only numbers in a list (can't remember why this is necessary) afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd()) number = Group( (floatNumber + afterNumber) | (intNumber + afterNumber) )('number') boolean = Group( CaselessKeyword("true") | CaselessKeyword("false") )('boolean') # Function calls arg = Group( boolean | number | aString | expression ) args = delimitedList(arg)('args') func = Word(alphas+'_', alphanums+'_')('func') call = Group( func + Literal('(').suppress() + args + Literal(')').suppress() )('call') # Metric pattern (aka. pathExpression) validMetricChars = alphanums + r'''!#$%&"'*+-.:;<=>?@[\]^_`|~''' pathExpression = Combine( Word(validMetricChars) + Combine( ZeroOrMore( Group( Literal('{') + Word(validMetricChars + ',') + Literal('}') + Optional( Word(validMetricChars) ) ) ) ) )('pathExpression') expression << Group(call | pathExpression)('expression') grammar << expression def enableDebug(): for name,obj in globals().items(): try: obj.setName(name) obj.setDebug(True) except: pass
Python
0.000005
1fdd1f306d45f6aeee91c7f016f7c37286ee3b3b
clear signing
lang/python/examples/howto/clear-sign-file.py
lang/python/examples/howto/clear-sign-file.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from __future__ import absolute_import, division, unicode_literals # Copyright (C) 2018 Ben McGinnes <ben@gnupg.org> # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License and the GNU # Lesser General Public Licensefor more details. # # You should have received a copy of the GNU General Public License and the GNU # Lesser General Public along with this program; if not, see # <http://www.gnu.org/licenses/>. import gpg import sys """ Clear-signs a file with a specified key. If entering both the key and the filename on the command line, the key must be entered first. """ if len(sys.argv) > 3: logrus = sys.argv[1] filename = " ".join(sys.argv[2:]) elif len(sys.argv) == 3: logrus = sys.argv[1] filename = sys.argv[2] elif len(sys.argv) == 2: logrus = sys.argv[1] filename = input("Enter the path and filename to sign: ") else: logrus = input("Enter the fingerprint or key ID to sign with: ") filename = input("Enter the path and filename to sign: ") with open(filename, "rb") as f: text = f.read() key = list(gpg.Context().keylist(pattern=logrus)) with gpg.Context(armor=True, signers=key) as c: signed_data, result = c.sign(text, mode=gpg.constants.sig.mode.CLEAR) with open("{0}.asc".format(filename), "wb") as f: f.write(signed_data)
Python
0
c199892e07217f164ae694d510b206bfa771090b
remove unused import
src/vmw/vco/components.py
src/vmw/vco/components.py
# Copyright (c) 2001-2010 Twisted Matrix Laboratories. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from zope.interface import interface, declarations from zope.interface.adapter import AdapterRegistry # The following is taken almots as-is from twisted.python.components _vcoRegistry = AdapterRegistry() def _registered(registry, required, provided): """ Return the adapter factory for the given parameters in the given registry, or None if there is not one. """ return registry.get(required).selfImplied.get(provided, {}).get('') def registerAdapter(adapterFactory, origInterface, *interfaceClasses): """Register an adapter class. An adapter class is expected to implement the given interface, by adapting instances implementing 'origInterface'. An adapter class's __init__ method should accept one parameter, an instance implementing 'origInterface'. """ assert interfaceClasses, "You need to pass an Interface" # deal with class->interface adapters: if not isinstance(origInterface, interface.InterfaceClass): origInterface = declarations.implementedBy(origInterface) for interfaceClass in interfaceClasses: factory = _registered(_vcoRegistry, origInterface, interfaceClass) if factory is not None: raise ValueError("an adapter (%s) was already registered." % (factory, )) for interfaceClass in interfaceClasses: _vcoRegistry.register([origInterface], interfaceClass, '', adapterFactory) # add global adapter lookup hook for our newly created registry def _hook(iface, ob, lookup=_vcoRegistry.lookup1): factory = lookup(declarations.providedBy(ob), iface) if factory is None: return None else: return factory(ob) interface.adapter_hooks.append(_hook)
# Copyright (c) 2001-2010 Twisted Matrix Laboratories. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from zope.interface import interface, implements, declarations from zope.interface.adapter import AdapterRegistry # The following is taken almots as-is from twisted.python.components _vcoRegistry = AdapterRegistry() def _registered(registry, required, provided): """ Return the adapter factory for the given parameters in the given registry, or None if there is not one. """ return registry.get(required).selfImplied.get(provided, {}).get('') def registerAdapter(adapterFactory, origInterface, *interfaceClasses): """Register an adapter class. An adapter class is expected to implement the given interface, by adapting instances implementing 'origInterface'. An adapter class's __init__ method should accept one parameter, an instance implementing 'origInterface'. """ assert interfaceClasses, "You need to pass an Interface" # deal with class->interface adapters: if not isinstance(origInterface, interface.InterfaceClass): origInterface = declarations.implementedBy(origInterface) for interfaceClass in interfaceClasses: factory = _registered(_vcoRegistry, origInterface, interfaceClass) if factory is not None: raise ValueError("an adapter (%s) was already registered." % (factory, )) for interfaceClass in interfaceClasses: _vcoRegistry.register([origInterface], interfaceClass, '', adapterFactory) # add global adapter lookup hook for our newly created registry def _hook(iface, ob, lookup=_vcoRegistry.lookup1): factory = lookup(declarations.providedBy(ob), iface) if factory is None: return None else: return factory(ob) interface.adapter_hooks.append(_hook)
Python
0.000001
f76c06acf52094cd13cdf7087fa8d3914c2b992a
Add interactive module
sirius/interactive.py
sirius/interactive.py
"""Interactive sirius module Use this module to define variables and functions to be globally available when using 'from sirius.interactive import *' """ from pyaccel.interactive import * import sirius.SI_V07 as si_model import sirius.BO_V901 as bo_model __all__ = [name for name in dir() if not name.startswith('_')] print('Names defined in sirius.interactive: ' + ', '.join(__all__) + '.\n')
Python
0.000001
f1e6926f964877acc3bfe0d667a199861b431ed7
add test_xadc
software/test_xadc.py
software/test_xadc.py
def main(wb): wb.open() regs = wb.regs # # # print("temperature: %f°C" %(regs.xadc_temperature.read()*503.975/4096 - 273.15)) print("vccint: %fV" %(regs.xadc_vccint.read()/4096*3)) print("vccaux: %fV" %(regs.xadc_vccaux.read()/4096*3)) print("vccbram: %fV" %(regs.xadc_vccbram.read()/4096*3)) # # # wb.close()
Python
0.000001
c2dab85f24e648c66daae847f19b605271ed858b
Add more threader tests
spec/threader_spec.py
spec/threader_spec.py
import queue from functools import partial from doublex import Spy, Mock from expects import expect, be from doublex_expects import have_been_called from pysellus import threader with description('the threader module'): with it('should create as many threads as the sum of len(values) of the supplied dict'): a_stream = Mock() another_stream = Mock() foo = Spy() a_function = Spy() another_function = Spy() streams_to_observers = { a_stream: [a_function], another_stream: [a_function, another_function] } threads = threader.build_threads(streams_to_observers, foo) expected_length = sum( len(fn_list) for fn_list in streams_to_observers.values() ) expect(len(threads)).to(be(expected_length)) with it('should create a properly initialized thread'): stream = Mock() observer = Spy() target = Spy().target_function thread = threader.make_thread(target, stream, observer) thread.start() thread.join() expect(target).to(have_been_called) with it('should call the target function with the correct arguments'): stream = Mock() observer = Spy() que = queue.Queue(maxsize=1) # Return a list with the stream and the observer fn target_function = lambda s, o: [s, o] # We can't return from a function running in another thread # so we put the value on a queue target_wrapper = lambda q, s, o: q.put(target_function(s, o)) # We define a partial so that we don't have to pass the queue # as a parameter to make_thread target_partial = partial(target_wrapper, que) thread = threader.make_thread(target_partial, stream, observer) thread.start() thread.join() result = que.get() # result is [stream, observer] expect(result[0]).to(be(stream)) expect(result[1]).to(be(observer))
from expects import expect, be from doublex import Spy, Mock from pysellus import threader with description('the threader module'): with it('should create as many threads as keys * values in the supplied dict'): a_stream = Mock() another_stream = Mock() foo = Spy() a_function = Spy() another_function = Spy() streams_to_observers = { a_stream: [a_function], another_stream: [a_function, another_function] } threads = threader.build_threads(streams_to_observers, foo) expected_length = sum( len(fn_list) for fn_list in streams_to_observers.values() ) expect(len(threads)).to(be(expected_length))
Python
0
e6a137026ff9b84814199517a452d354e121a476
Create quiz_3.py
laboratorios/quiz_3.py
laboratorios/quiz_3.py
#dado un intervalo de tiempo en segundos, calcular los segundos restantes #corresponden para convertirse exactamente en minutos. Este programa debe #funcionar para 5 oportunidades. chance = 0 segundos_restantes = 0 while chance < 5: segundos = int (input("Introduzca sus segundos:")) chance +=1 if segundos / 60: segundos_restantes =60-segundos%60 print (segundos_restantes)
Python
0.001596
90851f4fdb1eb69bb3d6d953974d9a399d60bd13
add browser_render.py
5.动态内容/5.browser_render.py
5.动态内容/5.browser_render.py
#!/usr/bin/env python # coding:utf-8 # 渲染效果的类实现方式 # 定时器用于跟踪等待时间,并在截止时间到达时取消事件循环。否则,当出现网络问题时,事件循环就会无休止地运行下去 。 import re import csv import time import lxml.html try: from PySide.QtGui import QApplication from PySide.QtCore import QUrl, QEventLoop, QTimer from PySide.QtWebKit import QWebView except ImportError: from PyQt4.QtGui import QApplication from PyQt4.QtCore import QUrl, QEventLoop, QTimer from PyQt4.QtWebKit import QWebView class BrowserRender(QWebView): def __init__(self, display=True): self.app = QApplication([]) QWebView.__init__(self) if display: # show the browser self.show() def open(self, url, timeout=60): """Wait for download to complete and return result""" loop = QEventLoop() timer = QTimer() timer.setSingleShot(True) timer.timeout.connect(loop.quit) self.loadFinished.connect(loop.quit) self.load(QUrl(url)) timer.start(timeout * 1000) loop.exec_() if timer.isActive(): # downloaded successfully timer.stop() return self.html() else: # timed out print 'Request timed out:', url def html(self): """Shortcut to return the current HTML""" return self.page().mainFrame().toHtml() def find(self, pattern): """Find all elements that match the pattern""" return self.page().mainFrame().findAllElements(pattern) def attr(self, pattern, name, value): """Set attribute for matching elements""" for e in self.find(pattern): e.setAttribute(name, value) def text(self, pattern, value): """Set attribute for matching elements""" for e in self.find(pattern): e.setPlainText(value) def click(self, pattern): """Click matching elements""" for e in self.find(pattern): e.evaluateJavaScript("this.click()") def wait_load(self, pattern, timeout=60): """Wait for this pattern to be found in webpage and return matches""" deadline = time.time() + timeout while time.time() < deadline: self.app.processEvents() matches = self.find(pattern) if matches: return matches print 'Wait load timed out' def main(): br = BrowserRender() br.open('http://example.webscraping.com/search') br.attr('#search_term', 'value', '.') br.text('#page_size option:checked', '1000') br.click('#search') elements = br.wait_load('#results a') writer = csv.writer(open('countries.csv', 'w')) for country in [e.toPlainText().strip() for e in elements]: writer.writerow([country]) if __name__ == '__main__': main()
Python
0.000002
58ac46511964ca1dd3de25d2b6053eb785e3e281
Add outlier detection util script.
util/detect-outliers.py
util/detect-outliers.py
#!/usr/bin/env python2 # # Detect outlier faces (not of the same person) in a directory # of aligned images. # Brandon Amos # 2016/02/14 # # Copyright 2015-2016 Carnegie Mellon University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time start = time.time() import argparse import cv2 import itertools import os import glob import numpy as np np.set_printoptions(precision=2) from sklearn.covariance import EllipticEnvelope from sklearn.metrics.pairwise import euclidean_distances import openface fileDir = os.path.dirname(os.path.realpath(__file__)) modelDir = os.path.join(fileDir, '..', 'models') openfaceModelDir = os.path.join(modelDir, 'openface') def main(): parser = argparse.ArgumentParser() parser.add_argument('--networkModel', type=str, help="Path to Torch network model.", default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')) parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96) parser.add_argument('--cuda', action='store_true') parser.add_argument('--threshold', type=int, default=0.9) parser.add_argument('directory') args = parser.parse_args() net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda) reps = [] paths = sorted(list(glob.glob(os.path.join(args.directory, '*.png')))) for imgPath in paths: reps.append(net.forwardPath(imgPath)) mean = np.mean(reps, axis=0) dists = euclidean_distances(reps, mean) outliers = [] for path, dist in zip(paths, dists): dist = dist.take(0) if dist > args.threshold: outliers.append((path, dist)) print("Found {} outlier(s) from {} images.".format(len(outliers), len(paths))) for path, dist in outliers: print(" + {} ({:0.2f})".format(path, dist)) if __name__ == '__main__': main()
Python
0
dad13d26aaf58ea186891e138ac9a10153363c8a
add vicon data extraction
script_r448_vicon_process.py
script_r448_vicon_process.py
import pickle import signal_processing as sig_proc dir_name = '../data/r448/r448_131022_rH/' img_ext = '.png' save_img = True show = False save_obj = True sp = sig_proc.Signal_processing(save_img, show, img_ext) filename='p0_3RW05' file_events=sp.load_csv(dir_name+filename+'_EVENTS.csv') file_analog=sp.load_csv(dir_name+filename+'_ANALOG.csv') data=sp.vicon_extract(file_events) data=sp.vicon_extract(file_analog,data) data=sp.synch_vicon_with_TDT(data) print('\n\n#################') print('#### END ####')
Python
0.000001
b46e7e31c5476c48e2a53d5a632354700d554174
Add test_html_fetchers
tests/test_html_fetchers.py
tests/test_html_fetchers.py
import os import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import unittest from unittest import mock from web_scraper.core import html_fetchers def mocked_requests_get(*args, **kwargs): """this method will be used by the mock to replace requests.get""" class MockResponse: def __init__(self, html, status_code): self.html = html self.status_code = status_code def text(self): return self.html def status_code(self): return self.status_code if args[0] == 'http://example.com/': return MockResponse(200, (200, 'html')) return MockResponse(404, (404, 'Not Found')) class TestHtmlFetchersMethods(unittest.TestCase): @mock.patch('web_scraper.core.html_fetchers.requests.get', side_effect=mocked_requests_get) def test_fetch_html_document_200(self, mock_get): """fetch_html_document should return 200 and html""" response = html_fetchers.fetch_html_document('http://example.com/') # reponse = tuple, MockResponse object status_code = response[0][0] html = response[0][1] self.assertEqual((status_code, html), (200, 'html')) @mock.patch('web_scraper.core.html_fetchers.requests.get', side_effect=mocked_requests_get) def test_fetch_html_document_404(self, mock_get): """fetch_html_document should return 404 and 'Not Found'""" response = html_fetchers.fetch_html_document('http://example.com/nonexistantpath') # reponse = tuple, MockResponse object. status_code = response[0][0] html = response[0][1] self.assertEqual((status_code, html), (404, 'Not Found')) if __name__ == '__main__': unittest.main()
Python
0.000001
b4f8e8d38636a52d3d4b199fdc670ff93eca33f6
Add prototype for filters module.
mltils/filters.py
mltils/filters.py
# pylint: disable=missing-docstring, invalid-name, import-error class VarianceFilter(object): pass class SimilarityFilter(object): pass class CorrelationFilter(object): pass
Python
0
b0f5c33461d08325581cc0ad272c7f2b39b8dc66
Fix typo.
metpy/calc/__init__.py
metpy/calc/__init__.py
import basic from basic import * __all__ = [] __all__.extend(basic.__all__)
import basic from basic import * __all__ == [] __all__.extend(basic.__all__)
Python
0.001604
167712a6640abca106bbcd50daf5dc22ba90083d
Fix log formatting
src/sentry/tasks/email.py
src/sentry/tasks/email.py
""" sentry.tasks.email ~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import, print_function import logging from django.core.mail import get_connection from sentry.tasks.base import instrumented_task logger = logging.getLogger(__name__) def _get_user_from_email(group, email): from sentry.models import Project, User # TODO(dcramer): we should encode the userid in emails so we can avoid this for user in User.objects.filter(email__iexact=email): # Make sure that the user actually has access to this project if group.project not in Project.objects.get_for_user( team=group.team, user=user): logger.warning('User %r does not have access to group %r', user, group) continue return user @instrumented_task( name='sentry.tasks.email.process_inbound_email', queue='email') def process_inbound_email(mailfrom, group_id, payload): """ """ from sentry.models import Event, Group from sentry.web.forms import NewNoteForm try: group = Group.objects.select_related('project', 'team').get(pk=group_id) except Group.DoesNotExist: logger.warning('Group does not exist: %d', group_id) return user = _get_user_from_email(group, mailfrom) if user is None: logger.warning('Inbound email from unknown address: %s', mailfrom) return event = group.get_latest_event() or Event() Event.objects.bind_nodes([event], 'data') event.group = group event.project = group.project form = NewNoteForm({'text': payload}) if form.is_valid(): form.save(event, user) @instrumented_task( name='sentry.tasks.email.send_email', queue='email') def send_email(message): connection = get_connection() connection.send_messages([message])
""" sentry.tasks.email ~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import, print_function import logging from django.core.mail import get_connection from sentry.tasks.base import instrumented_task logger = logging.getLogger(__name__) def _get_user_from_email(group, email): from sentry.models import Project, User # TODO(dcramer): we should encode the userid in emails so we can avoid this for user in User.objects.filter(email__iexact=email): # Make sure that the user actually has access to this project if group.project not in Project.objects.get_for_user( team=group.team, user=user): logger.warning('User %r does not have access to group %r', (user, group)) continue return user @instrumented_task( name='sentry.tasks.email.process_inbound_email', queue='email') def process_inbound_email(mailfrom, group_id, payload): """ """ from sentry.models import Event, Group from sentry.web.forms import NewNoteForm try: group = Group.objects.select_related('project', 'team').get(pk=group_id) except Group.DoesNotExist: logger.warning('Group does not exist: %d', group_id) return user = _get_user_from_email(group, mailfrom) if user is None: logger.warning('Inbound email from unknown address: %s', mailfrom) return event = group.get_latest_event() or Event() Event.objects.bind_nodes([event], 'data') event.group = group event.project = group.project form = NewNoteForm({'text': payload}) if form.is_valid(): form.save(event, user) @instrumented_task( name='sentry.tasks.email.send_email', queue='email') def send_email(message): connection = get_connection() connection.send_messages([message])
Python
0.000005
5e1c58db69adad25307d23c240b905eaf68e1671
Add fade animation
src/fade_animation.py
src/fade_animation.py
import animation, colorsys def colorunpack(color): color = int(color) return ((color >> 16) / 255, ((color >> 8) & 255) / 0xff, (color & 0xff) / 0xff) def colorpack(color): return sum(int(color[i] * 0xff) << (16 - 8*i) for i in range(3)) class FadeAnimation(animation.Animation): """ animation fades relevant section of light strip solid between two colors for the duration of the animation """ def __init__(self, start_time, stop_time, start_pos, stop_pos, start_color, stop_color): """ :param start_time: seconds since the epoch to start animation :param stop_time: seconds since the epoch to stop animation :param start_pos: number from 0 to 1 indicating start on strip :param stop_pos: number from 0 to 1 indicating stop on strip :param start_color: initial 24-bit integer RGB color :param stop_color: final 24-bit integer RGB color """ self.set_start_time(start_time) self.set_stop_time(stop_time) self.set_start_pos(start_pos) self.set_stop_pos(stop_pos) self.__start_hsv = colorsys.rgb_to_hsv(*colorunpack(start_color)) self.__stop_hsv = colorsys.rgb_to_hsv(*colorunpack(stop_color)) def get_color(self, time, pos): """ :param time: current time as seconds since the epoch :param pos: position from 0 to 1 to get color for :return: 24-bit integer RGB color """ lerp = (time - self.get_start_time()) \ / (self.get_stop_time() - self.get_start_time()) lerp = max(0, min(1, lerp)) curr = (self.__start_hsv[0] + (self.__stop_hsv[0]-self.__start_hsv[0])*lerp, self.__start_hsv[1] + (self.__stop_hsv[1]-self.__start_hsv[1])*lerp, self.__start_hsv[2] + (self.__stop_hsv[2]-self.__start_hsv[2])*lerp) return colorpack(colorsys.hsv_to_rgb(*curr))
Python
0.000001
f537abe2ff1826a9decd9dace5597cbc4f7f318b
Add 1.6
1_arrays_hashtables/string_compression.py
1_arrays_hashtables/string_compression.py
def compress(string): count_array = [] element_count = 1 for index, character in enumerate(string[1:]): print(character, string[index]) if string[index] == character: element_count = element_count + 1 else: count_array.append(element_count) element_count = 1 count_array.append(element_count) # if len(count_array) == len(string): # return string compressed_string = '' string_position = 0 print(count_array) for numbers in count_array: if(numbers != 1): compressed_string += str(numbers) compressed_string += string[string_position] string_position += numbers return compressed_string if __name__ == '__main__': print(compress('aafbbcdaaaaa'))
Python
0.999996
3e15f6d64ccbb1f98ff64323a25304db662a45ba
Add nice_number function to format decimals to english
mycroft/util/format.py
mycroft/util/format.py
# -*- coding: iso-8859-15 -*- # Copyright 2017 Mycroft AI, Inc. # # This file is part of Mycroft Core. # # Mycroft Core is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mycroft Core is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>. import math FRACTION_STRING_EN = { 2: 'half', 3: 'third', 4: 'forth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eigth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelveth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentyith' } def nice_number(number, lang="en-us", speech=True, denominators=None): """Format a float to human readable functions This function formats a float to human understandable functions. Like 4.5 becomes 4 and a half for speech and 4 1/2 for text Args: number (str): the float to format lang (str): the code for the language text is in speech (bool): to return speech representation or text representation denominators (iter of ints): denominators to use, default [1 .. 20] Returns: (str): The formatted string. """ result = convert_number(number, denominators) if not result: return str(round(number, 3)) if not speech: if num == 0: return str(whole) else: return '{} {}/{}'.format(whole, num, den) lang_lower = str(lang).lower() if lang_lower.startswith("en"): return nice_number_en(result) # TODO: Normalization for other languages return str(number) def nice_number_en(result): """ English conversion for nice_number """ whole, num, den = result if num == 0: return str(whole) den_str = FRACTION_STRING_EN[den] if whole == 0: return_string = '{} {}'.format(num, den_str) else: return_string = '{} and {} {}'.format(whole, num, den_str) if num > 1: return_string += 's' return return_string def convert_number(number, denominators): """ Convert floats to mixed fractions """ int_number = int(number) if int_number == number: return int_number, 0, 1 frac_number = abs(number - int_number) if not denominators: denominators = range(1, 21) for denominator in denominators: numerator = abs(frac_number) * denominator if (abs(numerator - round(numerator)) < 0.01): break else: return None return int_number, int(round(numerator)), denominator
Python
0.002965
296efcc28e19fc76371496881a546f1ca52dc622
add nagios check for iembot availability
nagios/check_iembot.py
nagios/check_iembot.py
"""Ensure iembot is up.""" import sys import requests def main(): """Go Main Go.""" req = requests.get('http://iembot:9004/room/kdmx.xml') if req.status_code == 200: print("OK - len(kdmx.xml) is %s" % (len(req.content), )) return 0 print("CRITICAL - /room/kdmx.xml returned code %s" % (req.status_code, )) return 2 if __name__ == '__main__': sys.exit(main())
Python
0
1d0c0741f1605f3786a752288161c679ab271ea2
Add a utility file for aggregating decorators
website/addons/osfstorage/decorators.py
website/addons/osfstorage/decorators.py
import functools from webargs import Arg from webargs import core from framework.auth.decorators import must_be_signed from website.models import User from framework.exceptions import HTTPError from website.addons.osfstorage import utils from website.project.decorators import ( must_not_be_registration, must_have_addon, ) class JSONParser(core.Parser): def __init__(self, data): self._data = data def parse(self, args): return super(JSONParser, self).parse(args, None, ('json',)) def parse_json(self, _, name, arg): if self._data: return core.get_value(self._data, name, arg.multiple) else: return core.Missing def path_validator(path): return ( path.startswith('/') and len(path.strip('/').split('/')) < 3 ) file_opt_args = { 'source': Arg({ 'path': Arg(str, required=True, validate=path_validator), 'cookie': Arg(None, required=True, use=User.from_cookie, validate=lambda x: x is not None) }), 'destination': Arg({ 'path': Arg(str, required=True, validate=path_validator), 'cookie': Arg(None, required=True, use=User.from_cookie, validate=lambda x: x is not None) }) } def waterbutler_opt_hook(func): @must_be_signed @utils.handle_odm_errors @must_not_be_registration @must_have_addon('osfstorage', 'node') @functools.wraps(func) def wrapped(payload, *args, **kwargs): kwargs.update(JSONParser(payload).parse(file_opt_args)) return func(*args, **kwargs) return wrapped
Python
0.000001
bb8e7ee023d678e68d1da3018bf6d1d3d36d55bd
Create new package (#6588)
var/spack/repos/builtin/packages/perl-statistics-descriptive/package.py
var/spack/repos/builtin/packages/perl-statistics-descriptive/package.py
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PerlStatisticsDescriptive(PerlPackage): """Module of basic descriptive statistical functions.""" homepage = "http://search.cpan.org/~shlomif/Statistics-Descriptive-3.0612/lib/Statistics/Descriptive.pm" url = "http://search.cpan.org/CPAN/authors/id/S/SH/SHLOMIF/Statistics-Descriptive-3.0612.tar.gz" version('3.0612', 'e38cfbc1e3962d099b62a14a57a175f1')
Python
0
81bcfe4a31d8e9ea92497288c3d264755949d809
check for some statistics on the dataset.
stats_about_tweet_data.py
stats_about_tweet_data.py
from collections import defaultdict import codecs import matplotlib.pyplot as plt import pylab as P import numpy as np F_IN = "usrs_with_more_than_20_tweets.dat" #F_OUT = "tweets_with_usrs_with_more_than_20_tweets.dat" #f_out = "usrs_with_more_than_20_tweets.dat" USR_TWEETS = defaultdict(int) def plot_hist(data): n, bins, patches = P.hist(data, bins=np.logspace(0.1, 3.5), histtype='step', log=True, label="# of tweets per user") P.setp(patches, 'facecolor', 'g', 'alpha', 0.75) #y = P.normpdf( bins, mu, sigma) #l = P.plot(bins, y, 'k--', linewidth=1.5) # # create a histogram by providing the bin edges (unequally spaced) # #P.figure() # # now we create a cumulative histogram of the data # #P.grid(True) #P.ylim(0, 1.05) #P.legend() P.gca().set_xscale("log") P.show() def filter_users(thrshld=20): filtered_lst = [] for usr, tweets in USR_TWEETS.iteritems(): if USR_TWEETS[usr] > thrshld: filtered_lst.append(usr) return filtered_lst ''' # DONE once is enough def filter_dataset(thrshld=20): user_tweets = tweets_per_user(F_IN) filtered_lst = filter_users(user_tweets) cnt_selected_tweets = 0 output_file = codecs.open(F_OUT, 'w', encoding='utf8') with codecs.open(F_IN,'r', encoding='utf8') as input_file: for line in input_file: line_splt = line.split() usr = line_splt[0] if usr in filtered_lst: cnt_selected_tweets += 1 output_file.write(line) output_file.close() input_file.close() print "Filtered dataset for users with more than: ", thrshld, " tweets." print "New number of tweets: ", cnt_selected_tweets print "New number of users: ", len(filtered_lst) ''' def tweets_per_user(): cnt_all_tweets = 0 global USR_TWEETS with codecs.open(F_IN,'r', encoding='utf8') as input_file: # the code loops through the input, collects tweets for each user into a dict for line in input_file: cnt_all_tweets += 1 line = line.split() user = line[0] USR_TWEETS[user] += 1 print "Read ENG tweets: ", cnt_all_tweets, " from: ", len(USR_TWEETS.keys()), " distinct users." max_tweets = max(USR_TWEETS.values()) print "MAX tweets ", max_tweets, " has/ve the user/s ", \ [usr for usr, tweets in USR_TWEETS.iteritems() if USR_TWEETS[usr] == max_tweets] input_file.close() def plot_hist_usr_tweets(): usr_tweets = tweets_per_user() plot_hist(usr_tweets.values()) def filter_dataset_double_usr_filter(thrshld=20): filtered_lst = filter_users(USR_TWEETS) cnt_selected_tweets = 0 output_file = codecs.open(F_OUT, 'w', encoding='utf8') with codecs.open(F_IN,'r', encoding='utf8') as input_file: for line in input_file: line_splt = line.split() usr = line_splt[0] usr2 = line_splt[1] if usr and usr2 in filtered_lst: cnt_selected_tweets += 1 output_file.write(line) output_file.close() input_file.close() print "Filtered dataset for users with more than: ", thrshld, " tweets." print "New number of tweets: ", cnt_selected_tweets print "New number of users: ", len(filtered_lst) #plot_hist_usr_tweets() #filter_dataset() #tweets_per_user() #print len(filter_users()) filter_dataset_double_usr_filter()
Python
0
d7c0525a1b62bbe6b8425c0bb2dda0e1fad680b8
Create enforce.py
enforce.py
enforce.py
"""This plugins allows a user to enforce modes set on channels""" """e.g. enforcing +o on nick""" """Requires admin""" from utils import add_cmd, add_handler import utils from admin import deop name = "enforce" cmds = ["enforce"] def main(irc): if name not in irc.plugins.keys(): irc.plugins[name] = {} @add_cmd def enforce(irc, event, args): """[<channel>] <modes> <nick> Enforces a mode to <nick> in the current channel if no channel argument is given. """ message = event.arguments[0].split() try: if irc.is_channel(message[1]): unrecognised_modes = [] unset_modes = [] set_modes = [] mode_diff = "+" for pos, mode in enumerate(message[2]): if mode in utils.argmodes["set"] or mode in utils.argmodes["unset"] or mode in ["+", "-"]: pass else: # What on earth is that mode? unrecognised_modes.append(mode) for mode in message[2]: if mode == "+": mode_diff = "+" elif mode == "-": mode_diff = "-" else: if mode_diff == "+": if mode in unset_modes: irc.reply(event, "This mode {} is already set and could not be unset for {}".format(mode, message[3])) else: set_modes.append(mode) elif mode_diff == "-": # else but who cares? if mode in set_modes: irc.reply(event, "This mode {} is already set and could not be set for {}".format(mode, message[3])) else: unset_modes.append(mode) if unrecognised_modes: irc.reply(event, "I could not recognise these modes: {}".format("".join(unrecognised_modes))) else: if len(message) >= 4: if not "enforce" in irc.channels[message[1]]: irc.channels[message[1]]["enforce"] = {} irc.channels[message[1]]["enforce"][message[3]] = { "set": set_modes or "", "unset": unset_modes or "" } else: irc.reply(event, "You didn't specify a nick to enforce modes to") except IndexError: irc.reply(event, utils.gethelp("enforce")) def on_mode(irc, conn, event): modes = utils.split_modes(event.arguments) irc.notice("BWBellairs", str(modes)) if "enforce" in irc.channels[event.target].keys(): for mode in modes: subject = mode.split()[1] mode_type = mode.split()[0] if subject in irc.channels[event.target]["enforce"].keys(): modes_set = irc.channels[event.target]["enforce"][subject] if mode_type[0:2] == "+o" and mode_type[1] in modes_set["unset"]: irc.notice("BWBellairs", "deop him!!!") add_handler(on_mode, name)
Python
0
12f7dddcbe8c7c2160bf8de8f7a9c3082b950003
Create longest-harmonious-subsequence.py
Python/longest-harmonious-subsequence.py
Python/longest-harmonious-subsequence.py
# Time: O(n) # Space: O(n) class Solution(object): def findLHS(self, nums): """ :type nums: List[int] :rtype: int """ lookup = collections.defaultdict(int) result = 0 for num in nums: lookup[num] += 1 for diff in [-1, 1]: if (num + diff) in lookup: result = max(result, lookup[num] + lookup[num + diff]) return result
Python
0.999951
d7cc3d6590d1d6d46bdf780b93e76ea6aa50334d
Create peak-index-in-a-mountain-array.py
Python/peak-index-in-a-mountain-array.py
Python/peak-index-in-a-mountain-array.py
# Time: O(logn) # Space: O(1) # Let's call an array A a mountain if the following properties hold: # # A.length >= 3 # There exists some 0 < i < A.length - 1 # such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1] # Given an array that is definitely a mountain, # return any i such that # A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1]. # # Example 1: # # Input: [0,1,0] # Output: 1 # Example 2: # # Input: [0,2,1,0] # Output: 1 # Note: # # 3 <= A.length <= 10000 # 0 <= A[i] <= 10^6 # A is a mountain, as defined above. class Solution(object): def peakIndexInMountainArray(self, A): """ :type A: List[int] :rtype: int """ left, right = 0, len(A) while left < right: mid = left + (right-left)//2 if A[mid] > A[mid+1]: right = mid else: left = mid+1 return left
Python
0.032261
64eab4beaf4e00d47423ea027ec6f40129ee2e95
Create execi-3.py
execi-3.py
execi-3.py
n1 = int(input("Digite um valor: ")) if n1 < 0: print (n1 * -1) elif n1 > 10: n2 = int(input("Digite outro valor: ")) print (n1 - n2) else: print (n1/5.0)
Python
0.000001
7dce21cc8fa3b81e150ed6586db8ca80cd537fc7
Add compat module to test package
test/compat.py
test/compat.py
# -*- coding: utf-8 -*- ''' A common module for compatibility related imports and definitions used during testing ''' from __future__ import unicode_literals import unittest from six import assertCountEqual, PY2 try: from unittest.mock import Mock, MagicMock, patch # @NoMove except ImportError: from mock import Mock, MagicMock, patch # @NoMove @UnusedImport class Py2TestCase(unittest.TestCase): def assertCountEqual(self, expected_sequence, actual_sequence): return assertCountEqual(self, expected_sequence, actual_sequence) if PY2: unittest.TestCase = Py2TestCase
Python
0
9a97847419ad569b1f9f3d302507aca8544944e2
test file
test_scheme.py
test_scheme.py
import unittest import scheme_mongo class TestScheme(unittest.TestCase): def runTest(self): mongo_uri = "mongodb://localhost/test.in" wrapper = scheme_mongo.open(mongo_uri) assert wrapper for result in wrapper: print result if __name__ == '__main__': unittest.main()
Python
0.000002
2e8e2450e5a2b800b2587e75b9e25fd0f5c678a6
Add tests for `pyrakoon.sequence`
test/test_sequence.py
test/test_sequence.py
# This file is part of Pyrakoon, a distributed key-value store client. # # Copyright (C) 2010 Incubaid BVBA # # Licensees holding a valid Incubaid license may use this file in # accordance with Incubaid's Arakoon commercial license agreement. For # more information on how to enter into this agreement, please contact # Incubaid (contact details can be found on www.arakoon.org/licensing). # # Alternatively, this file may be redistributed and/or modified under # the terms of the GNU Affero General Public License version 3, as # published by the Free Software Foundation. Under this license, this # file is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU Affero General Public License for more details. # You should have received a copy of the # GNU Affero General Public License along with this program (file "COPYING"). # If not, see <http://www.gnu.org/licenses/>. '''Tests for code in `pyrakoon.sequence`''' import unittest import itertools from pyrakoon import sequence bytes_ = lambda str_: (ord(c) for c in str_) class TestSequenceSerialization(unittest.TestCase): '''Test serialization of sequences and steps''' def test_set_step_serialization(self): '''Test serialization of 'set' steps''' expected = ''.join(chr(i) for i in itertools.chain( (1, 0, 0, 0), (3, 0, 0, 0), bytes_('key'), (5, 0, 0, 0), bytes_('value'), )) received = ''.join(sequence.Set('key', 'value').serialize()) self.assertEquals(expected, received) def test_delete_step_serialization(self): '''Test serialization of 'delete' steps''' expected = ''.join(chr(i) for i in itertools.chain( (2, 0, 0, 0), (3, 0, 0, 0), bytes_('key'), )) received = ''.join(sequence.Delete('key').serialize()) self.assertEquals(expected, received) def test_simple_test_and_set_step_serialization(self): '''Test serialization of 'test_and_set' steps''' expected = ''.join(chr(i) for i in itertools.chain( (3, 0, 0, 0), (3, 0, 0, 0), bytes_('key'), (1,), (8, 0, 0, 0), bytes_('oldvalue'), (1,), (8, 0, 0, 0), bytes_('newvalue'), )) received = ''.join(sequence.TestAndSet( 'key', 'oldvalue', 'newvalue').serialize()) self.assertEquals(expected, received) def test_unset_test_and_set_step_serialization(self): '''Test serialization of 'test_and_step' with empty values''' expected = ''.join(chr(i) for i in itertools.chain( (3, 0, 0, 0), (3, 0, 0, 0), bytes_('key'), (0,), (0,), )) received = ''.join(sequence.TestAndSet('key', None, None).serialize()) self.assertEquals(expected, received) def test_empty_sequence_serialization(self): '''Test serialization of an empty sequence''' expected = ''.join(chr(i) for i in itertools.chain( (5, 0, 0, 0), (0, 0, 0, 0), )) received = ''.join(sequence.Sequence().serialize()) self.assertEquals(expected, received) def test_single_step_sequence_serialization(self): '''Test serialization of a one-step sequence''' expected = ''.join(chr(i) for i in itertools.chain( (5, 0, 0, 0), (1, 0, 0, 0), (1, 0, 0, 0), (3, 0, 0, 0), bytes_('key'), (5, 0, 0, 0), bytes_('value'), )) received = ''.join( sequence.Sequence(sequence.Set('key', 'value')).serialize()) self.assertEquals(expected, received) def test_sequence_serialization(self): '''Test serialization of a sequence''' expected = ''.join(chr(i) for i in itertools.chain( (5, 0, 0, 0), (3, 0, 0, 0), (1, 0, 0, 0), (3, 0, 0, 0), bytes_('key'), (5, 0, 0, 0), bytes_('value'), (2, 0, 0, 0), (3, 0, 0, 0), bytes_('key'), (3, 0, 0, 0), (3, 0, 0, 0), bytes_('key'), (0,), (1,), (5, 0, 0, 0), bytes_('value'), )) received = ''.join(sequence.Sequence( sequence.Set('key', 'value'), sequence.Delete('key'), sequence.TestAndSet('key', None, 'value') ).serialize()) self.assertEquals(expected, received) def test_nested_sequence_serialization(self): '''Test serialization of a nested sequence''' expected = ''.join(chr(i) for i in itertools.chain( (5, 0, 0, 0), # Sequence (3, 0, 0, 0), # 3 steps (2, 0, 0, 0), # Delete (3, 0, 0, 0), bytes_('key'), (5, 0, 0, 0), # Sequence (2, 0, 0, 0), # 2 steps (2, 0, 0, 0), # Delete (3, 0, 0, 0), bytes_('key'), (1, 0, 0, 0), # Set (3, 0, 0, 0), bytes_('key'), (5, 0, 0, 0), bytes_('value'), (5, 0, 0, 0), # Sequence (0, 0, 0, 0), # 0 steps )) received = ''.join(sequence.Sequence( sequence.Delete('key'), sequence.Sequence( sequence.Delete('key'), sequence.Set('key', 'value')), sequence.Sequence()).serialize()) self.assertEquals(expected, received)
Python
0.000001
fc95c998dc8c3caee3e0a4590b96c9ed7e0321a7
add a test suite for Division
test_htmlgen/block.py
test_htmlgen/block.py
from unittest import TestCase from asserts import assert_equal from htmlgen import Division class DivisionTest(TestCase): def test_render(self): div = Division() div.append("Test") assert_equal([b"<div>", b"Test", b"</div>"], list(iter(div)))
Python
0
e4980879f0f4a0d223cccc99a486fb62cbe5807f
change models.py
physics/models.py
physics/models.py
from django.db import models class Student(models.Model): """Student Info""" stu_id = models.CharField(u'学号', max_length=30, primary_key=True) name = models.CharField(u'姓名', max_length=30) password = models.CharField(u'密码', max_length=30) def __unicode__(self): return '{stu_id} {name}'.format(stu_id=self.stu_id, name=self.name) class Teacher(models.Model): """Teacher Info""" name = models.CharField(u'姓名', max_length=30) def __unicode__(self): return self.name class Questoin(models.Model): """Question Info""" title = models.TextField(u'题目') content = models.TextField(u'选项') answer = models.CharField(u'答案', max_length=1) def __unicode__(self): return self.title class Notification(self): """Notification Info""" title = models.TextField(u'通知标题') content = models.TextField(u'通知内容') time = models.DateField(u'通知时间') def __unicode__(self): return self.title
from django.db import models class Student(models.Model): """Student Info""" stu_id = models.CharField(u'学号', max_length=30, primary_key=True) name = models.CharField(u'姓名', max_length=30) password = models.CharField(u'密码', max_length=30) def __unicode__(self): return '{stu_id} {name}'.format(stu_id=self.stu_id, name=self.name) class Teacher(models.Model): """Teacher Info""" name = models.CharField(u'姓名', max_length=30) def __unicode__(self): return self.name class Questoin(models.Model): """Question Info""" title = models.TextField() content = models.TextField() answer = models.CharField(max_length=1) def __unicode__(self): return self.title class Notification(self): """Notification Info""" title = models.TextField() content = models.TextField() time = models.DateField() def __unicode__(self): return self.title
Python
0.000001
964d1f97df600308b23b6a91b9de8811795509a4
Add a test for the @cachit decorator.
sympy/core/tests/test_cache.py
sympy/core/tests/test_cache.py
from sympy.core.cache import cacheit def test_cacheit_doc(): @cacheit def testfn(): "test docstring" pass assert testfn.__doc__ == "test docstring" assert testfn.__name__ == "testfn"
Python
0
a8ddae9343683ca69067eecbece5ecff6d4e5d1d
Add myStrom switch platform
homeassistant/components/switch/mystrom.py
homeassistant/components/switch/mystrom.py
""" homeassistant.components.switch.mystrom ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for myStrom switches. For more details about this component, please refer to the documentation at https://home-assistant.io/components/switch.mystrom/ """ import logging import requests from homeassistant.components.switch import SwitchDevice from homeassistant.const import STATE_UNKNOWN DEFAULT_NAME = 'myStrom Switch' _LOGGER = logging.getLogger(__name__) # pylint: disable=unused-argument, too-many-function-args def setup_platform(hass, config, add_devices, discovery_info=None): """ Find and return myStrom switches. """ resource = config.get('resource') if resource is None: _LOGGER.error('Missing required variable: resource') return False try: requests.get(resource, timeout=10) except requests.exceptions.MissingSchema: _LOGGER.error("Missing resource or schema in configuration. " "Add http:// to your URL.") return False except requests.exceptions.ConnectionError: _LOGGER.error("No route to device. " "Please check the IP address in the configuration file.") return False add_devices([MyStromSwitch( config.get('name', DEFAULT_NAME), config.get('resource'))]) class MyStromSwitch(SwitchDevice): """ Represents a myStrom switch. """ def __init__(self, name, resource): self._state = STATE_UNKNOWN self._name = name self._resource = resource self.consumption = 0 @property def name(self): """ The name of the switch. """ return self._name @property def is_on(self): """ True if switch is on. """ return self._state @property def current_power_mwh(self): """ Current power consumption in mwh. """ return self.consumption def turn_on(self, **kwargs): """ Turn the switch on. """ request = requests.get('{}/relay'.format(self._resource), params={'state': '1'}, timeout=10) if request.status_code == 200: self._state = True else: _LOGGER.error("Can't turn on %s. Is device offline?", self._resource) def turn_off(self, **kwargs): """ Turn the switch off. """ request = requests.get('{}/relay'.format(self._resource), params={'state': '0'}, timeout=10) if request.status_code == 200: self._state = False else: _LOGGER.error("Can't turn off %s. Is device offline?", self._resource) def update(self): """ Gets the latest data from REST API and updates the state. """ try: request = requests.get('{}/report'.format(self._resource), timeout=10) if request.json()['relay'] is True: self._state = True else: self._state = False self.consumption = request.json()['power'] except requests.exceptions.ConnectionError: _LOGGER.error("No route to device '%s'. Is device offline?", self._resource)
Python
0
fbf5ecffb4249e7f881f53f30625a47a6e779592
Create selective_array_reversing.py
selective_array_reversing.py
selective_array_reversing.py
#Kunal Gautam #Codewars : @Kunalpod #Problem name: Selective Array Reversing #Problem level: 6 kyu def sel_reverse(arr,l): li=[] if not l: return arr for i in range(0,len(arr),l): if i+l>len(arr): li+=(list(reversed(arr[i:]))) else: li+=(list(reversed(arr[i:i+l]))) return li
Python
0.000166
afe8e16be43b5e66df0f7bf14832f77009aab151
Create __init__.py
oauth/__init__.py
oauth/__init__.py
#!/usr/bin/python # -*- coding: utf-8 -*- """ Created by bu on 2017-05-10 """ from __future__ import unicode_literals import json as complex_json import requests from utils import verify_sign from utils import get_sign class RequestClient(object): __headers = { 'Content-Type': 'application/json; charset=utf-8', 'Accept': 'application/json', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36' } def __init__(self, access_id, secret_key, headers=dict()): self.access_id = access_id self.secret_key = secret_key self.headers = self.__headers self.headers.update(headers) def set_authorization(self, params): params['access_id'] = self.access_id self.headers['access_id'] = self.access_id self.headers['AUTHORIZATION'] = get_sign(params, self.secret_key) def request(self, method, url, params=dict(), data='', json=dict()): method = method.upper() if method == 'GET': self.set_authorization(params) result = requests.request('GET', url, params=params, headers=self.headers) else: if data: json.update(complex_json.loads(data)) self.set_authorization(json) result = requests.request(method, url, json=json, headers=self.headers) return result class OAuthClient(object): def __init__(self, request): self.request = request self._body = dict() self._authorization = '' @property def body(self): raise NotImplementedError('extract body') @property def authorization(self): raise NotImplementedError('authorization') def verify_request(self, secret_key): return verify_sign(self.body, secret_key, self.authorization) class FlaskOAuthClient(OAuthClient): @property def body(self): if self._body: return self._body if self.request.method == 'GET': self._body = self.request.args.to_dict() elif self.request.is_json: self._body = self.request.json access_id = self.request.headers.get('ACCESS_ID') if access_id: self._body['access_id'] = access_id return self._body @property def authorization(self): if self._authorization: return self._authorization self._authorization = self.request.headers['AUTHORIZATION'] return self.authorization
Python
0.000429
05c3430b8a1cd1fc204a40916fe5ec7ab2a48e81
Save xls operations
operations/xls.py
operations/xls.py
# -*- coding: utf-8 -*- """ Load xlxs file (2010) """ from openpyxl import load_workbook class ExReader(object): """ Reading spreadsheets from a file """ def __init__(self, filename=None): super(ExReader, self).__init__() if filename is None: filename = "/Users/paulie/Downloads/test_baroque.xlsx" wb = load_workbook(filename=filename, read_only=True) # print(wb) # print(wb.worksheets) """ [<ReadOnlyWorksheet "Plan">, <ReadOnlyWorksheet "fête">, <ReadOnlyWorksheet "dispositif">, <ReadOnlyWorksheet "perfomance">, <ReadOnlyWorksheet "expérience">, <ReadOnlyWorksheet "Fins">, <ReadOnlyWorksheet "a classer">, <ReadOnlyWorksheet "exclus">] """ ## ALL # for ws in wb.worksheets: # self.get_sheet_data(ws) ## SINGLE # self.get_sheet_data(wb['perfomance']) self.get_sheet_data(wb['dispositif']) def read_block(self, data, emit_error=False): macro = None micro = None convention = None try: macro = data.pop(0).value micro = data.pop(0).value convention = data.pop(0).value except IndexError as e: if emit_error: raise IndexError("Could not read one of the block elements\n%s" % str(e)) return () # Should skip every row which has not a value in the first 3 cells if macro is None and micro is None and convention is None: if emit_error: raise IndexError("All block elements are empty") return () return (macro, micro, convention) def get_sheet_data(self, ws): row_num = 0 languages = [] terms = [] latest_macro = latest_micro = "-undefined-" for row in ws.rows: data = list(row) row_num += 1 last_element = "Unknown" # Get the block block = self.read_block(data) if len(block) == 0: if row_num == 1: raise KeyError("Cannot find headers!") continue # Unpack the block: # the first 3 elements removed from data macro, micro, convention = block if row_num > 1: if macro is not None and macro.strip() != '': latest_macro = macro if micro is not None and micro.strip() != '': latest_micro = micro cell_num = -1 from collections import OrderedDict term = OrderedDict({ 'term': convention, 'macro': latest_macro, 'micro': latest_micro, }) # print("TERM", term) for element in data: cell_num += 1 if element.value is None: if row_num == 1: if last_element is None and element.value is not None: raise KeyError("Missing language column name") # Warning: we need to know how many languages are expected! if cell_num > 6 and last_element is None: break # First row (header) tells you which languages # Store languages names from cell 4 on if row_num == 1 and element.value is not None: languages.append(element.value) else: try: language = languages[cell_num] term[language] = element.value except IndexError: pass # Keep track of last element last_element = element.value # Add last row/term if row_num > 1: print("TERM", term) terms.append(list(term.values())) if row_num > 15: from tabulate import tabulate table = tabulate( terms, # headers=term.keys(), tablefmt="fancy_grid") print(table) # print(terms) break if __name__ == '__main__': ExReader()
Python
0
a3bbd175ef5640843cb16b0166b462ffaed25242
standardize logging interface for fs-drift
fsd_log.py
fsd_log.py
import logging # standardize use of logging module in fs-drift def start_log(prefix): log = logging.getLogger(prefix) h = logging.StreamHandler() log_format = prefix + ' %(asctime)s - %(levelname)s - %(message)s' formatter = logging.Formatter(log_format) h.setFormatter(formatter) log.addHandler(h) log.setLevel(logging.DEBUG) return log #with open('/tmp/weights.csv', 'w') as w_f:
Python
0
52e71001b7e775daaaaf42280ebe06c31291b595
Add a simplemeshtest variant where all AJ packets of one node are always dropped
tests/failmeshtest.py
tests/failmeshtest.py
#!/usr/bin/env python from twisted.internet import reactor from mesh import Mesh, MeshNode, packet_type, ATTEMPT_JOIN import sys NUMNODES = 5 NUMPACKETS = 10 DELAY = 0.1 nodes = [] # We're optimists success = True class TestMeshNode(MeshNode): nodes = 1 def __init__ (self, name, mesh): MeshNode.__init__(self, name, mesh) def node_connected(self): MeshNode.node_connected(self) print "Connected" def newNode (self, data): MeshNode.newNode (self, data) print "node0 - Added " + data self.nodes += 1 if self.nodes == NUMNODES - 1: print "Everybody who could joined" for x in xrange(0, NUMPACKETS): reactor.callLater(0.1 * x, (lambda y: self.pushInput(str(y) + "\n")), x) def leftNode (self, data): MeshNode.leftNode (self, data) print data.rstrip() + " left" reactor.stop() class FailMeshNode (MeshNode): def __init__ (self, name, mesh): MeshNode.__init__(self, name, mesh) def sendPacket (self, data): if packet_type(data) != ATTEMPT_JOIN: MeshNode.sendPacket(self, data) class TestMesh(Mesh): expected = {} done = 0 def gotOutput(self, node, sender, data): global success if self.expected.get(node) == None: self.expected[node] = 0 if (self.expected.get(node, int(data)) != int(data)): print "Got " + data.rstrip() + " instead of " + \ str(self.expected[node]) + " from " + node.name success = False reactor.crash() if not sender in node.peers: print "Sender " + sender + " not in node peers" success = False reactor.crash() self.expected[node] = int(data) + 1 if self.expected[node] == 10: self.done += 1 if self.done == NUMNODES - 2: for x in self.nodes: x.stats() self.nodes[-2].disconnect() m = TestMesh() n = TestMeshNode("node0", m) nodes.append(n) m.addMeshNode(n) for x in xrange(1, NUMNODES - 1): nodes.append(m.addNode("node" + str(x))) x = NUMNODES - 1 n = FailMeshNode("node" + str(x), m) nodes.append(n) m.addMeshNode(n) # Connect all nodes to all others. 1024 bytes/s bandwidth, 50ms delay and 0% # packet loss.. (bandwidth and delay aren't implemented just yet) m.connect_full(1024, 50, 0.30) def timeout(): global success print "TIMEOUT!" success = False reactor.crash() reactor.callLater(60, timeout) reactor.run() if not success: print "FAILED" sys.exit(-1) print "SUCCESS"
Python
0.000002
57fc053939702f4baf04604a9226873c98526ae5
Add test for Moniker
tests/lsp/test_moniker.py
tests/lsp/test_moniker.py
############################################################################ # Copyright(c) Open Law Library. All rights reserved. # # See ThirdPartyNotices.txt in the project root for additional notices. # # # # Licensed under the Apache License, Version 2.0 (the "License") # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http: // www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # ############################################################################ import unittest from typing import List, Optional from pygls.lsp.methods import TEXT_DOCUMENT_MONIKER from pygls.lsp.types import (Moniker, MonikerKind, MonikerOptions, MonikerParams, Position, TextDocumentIdentifier, UniquenessLevel) from ..conftest import CALL_TIMEOUT, ClientServer class TestMoniker(unittest.TestCase): @classmethod def setUpClass(cls): cls.client_server = ClientServer() cls.client, cls.server = cls.client_server @cls.server.feature( TEXT_DOCUMENT_MONIKER, MonikerOptions(), ) def f(params: MonikerParams) -> Optional[List[Moniker]]: if params.text_document.uri == 'file://return.list': return [ Moniker( scheme='test_scheme', identifier='test_identifier', unique=UniquenessLevel.Global, kind=MonikerKind.Local, ), ] else: return None cls.client_server.start() @classmethod def tearDownClass(cls): cls.client_server.stop() def test_capabilities(self): capabilities = self.server.server_capabilities assert capabilities.moniker_provider def test_moniker_return_list(self): response = self.client.lsp.send_request( TEXT_DOCUMENT_MONIKER, MonikerParams( text_document=TextDocumentIdentifier(uri='file://return.list'), position=Position(line=0, character=0), ) ).result(timeout=CALL_TIMEOUT) assert response assert response[0]['scheme'] == 'test_scheme' assert response[0]['identifier'] == 'test_identifier' assert response[0]['unique'] == 'global' assert response[0]['kind'] == 'local' def test_references_return_none(self): response = self.client.lsp.send_request( TEXT_DOCUMENT_MONIKER, MonikerParams( text_document=TextDocumentIdentifier(uri='file://return.none'), position=Position(line=0, character=0), ) ).result(timeout=CALL_TIMEOUT) assert response is None if __name__ == '__main__': unittest.main()
Python
0.000001
20c4df8c61ee1f625ebd77c8613fc470a3e87438
add another lazy function
lazy_function/another_lazy_class.py
lazy_function/another_lazy_class.py
#!/usr/bin/env python # -*- coding: utf-8 -*- class lazy_property(object): def __init__(self, func, name=None, doc=None): self._func = func self._name = name or func.func_name self.__doc__ = doc or func.__doc__ def __get__(self, obj, objtype=None): if obj is None: return self value = self._func(obj) setattr(obj, self._name, value) return value class BaseRequest(object): def form(self): return 123 form = lazy_property(form) bb = BaseRequest() print bb.form print bb.form bb = BaseRequest() print bb.form print bb.form
Python
0.000217
1d8fccf6943adf40c77d5d2df002330719dcfcd1
test for S3Sync
tests/test_s3_sync.py
tests/test_s3_sync.py
import os import unittest from pathlib import Path import mock from taskcat._s3_sync import S3Sync class TestS3Sync(unittest.TestCase): def test_init(self): m_s3_client = mock.Mock() m_s3_client.list_objects_v2.return_value = { "Contents": [{"Key": "test_prefix/test_object", "ETag": "test_etag"}] } m_s3_client.delete_objects.return_value = {} m_s3_client.upload_file.return_value = None prefix = "test_prefix" base_path = "./" if os.getcwd().endswith("/tests") else "./tests/" base_path = Path(base_path + "data/").resolve() S3Sync( m_s3_client, "test_bucket", prefix, str(base_path / "lambda_build_with_submodules"), ) m_s3_client.list_objects_v2.assert_called_once() m_s3_client.delete_objects.assert_called_once() m_s3_client.upload_file.assert_called()
Python
0
0f1cf524c2b90d77e17d516a30d62632ebb5ed2f
Add pipeline for untar'ing GCS blobs.
datathon/datathon_etl_pipelines/generic_imagining/untar_gcs.py
datathon/datathon_etl_pipelines/generic_imagining/untar_gcs.py
r"""Untar .tar and .tar.gz GCS files.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions from datathon_etl_pipelines.dofns.read_tar_file import ReadTarFile from datathon_etl_pipelines.utils import get_setup_file import tensorflow as tf def write_file(element): path, contents = element with tf.io.gfile.GFile(path, 'wb') as fp: fp.write(contents) def main(): """Build and execute the Apache Beam pipeline using the commandline arguments.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--input_tars', required=True, nargs='+', help="""One or more wildcard patterns that give the full paths to the input tar files on GCS.""") parser.add_argument( '--output_dir', required=True, help="""The output directory to write the untar'd files to.""") args, pipeline_args = parser.parse_known_args() beam_options = PipelineOptions(pipeline_args) # serialize and provide global imports, functions, etc. to workers. beam_options.view_as(SetupOptions).save_main_session = True beam_options.view_as(SetupOptions).setup_file = get_setup_file() if args.output_dir.endswith('/'): out_dir = args.output_dir[:-1] else: out_dir = args.output_dir def get_full_output_path(relative_path): if relative_path.startswith('/'): return out_dir + relative_path else: return '{}/{}'.format(out_dir, relative_path) with beam.Pipeline(options=beam_options) as p: _ = \ (p | beam.Create(tf.io.gfile.glob(args.input_tars)) | 'Untar' >> beam.ParDo(ReadTarFile(), get_full_output_path) | 'Write' >> beam.Map(write_file)) if __name__ == '__main__': main()
Python
0
d68d4e8c1adfa1cdc9577d133c48717b504092e5
Test extension
tests/testcallable.py
tests/testcallable.py
# Copyright (C) 2007-2011 Michael Foord & the mock team # E-mail: fuzzyman AT voidspace DOT org DOT uk # http://www.voidspace.org.uk/python/mock/ from tests.support import is_instance, unittest2, X, SomeClass from mock import ( Mock, MagicMock, NonCallableMagicMock, NonCallableMock, patch, create_autospec ) """ Note that NonCallableMock and NonCallableMagicMock still have the unused (and unusable) attributes: return_value, side_effect, call_count, call_args and call_args_list. These could be removed or raise errors on getting / setting. They also have the assert_called_with and assert_called_once_with methods. Removing these would be pointless as fetching them would create a mock (attribute) that could be called without error. """ class TestCallable(unittest2.TestCase): def test_non_callable(self): for mock in NonCallableMagicMock(), NonCallableMock(): self.assertRaises(TypeError, mock) self.assertFalse(hasattr(mock, '__call__')) def test_attributes(self): one = NonCallableMock() self.assertTrue(issubclass(type(one.one), Mock)) two = NonCallableMagicMock() self.assertTrue(issubclass(type(two.two), MagicMock)) def test_subclasses(self): class MockSub(Mock): pass one = MockSub() self.assertTrue(issubclass(type(one.one), MockSub)) class MagicSub(MagicMock): pass two = MagicSub() self.assertTrue(issubclass(type(two.two), MagicSub)) def test_patch_spec(self): patcher = patch('%s.X' % __name__, spec=True) mock = patcher.start() self.addCleanup(patcher.stop) instance = mock() mock.assert_called_once_with() self.assertTrue(is_instance(instance, NonCallableMagicMock)) self.assertRaises(TypeError, instance) def test_patch_spec_instance(self): patcher = patch('%s.X' % __name__, spec=X()) mock = patcher.start() self.addCleanup(patcher.stop) self.assertTrue(is_instance(mock, NonCallableMagicMock)) self.assertRaises(TypeError, mock) def test_patch_spec_callable_class(self): class CallableX(X): def __call__(self): pass class Sub(CallableX): pass class Multi(SomeClass, Sub): pass class OldStyle: def __call__(self): pass class OldStyleSub(OldStyle): pass for Klass in CallableX, Sub, Multi, OldStyle, OldStyleSub: patcher = patch('%s.X' % __name__, spec=Klass) mock = patcher.start() self.addCleanup(patcher.stop) instance = mock() mock.assert_called_once_with() self.assertTrue(is_instance(instance, MagicMock)) instance() instance.assert_called_once_with() def test_create_autopsec(self): mock = create_autospec(X) instance = mock() self.assertRaises(TypeError, instance) mock = create_autospec(X()) self.assertRaises(TypeError, mock)
# Copyright (C) 2007-2011 Michael Foord & the mock team # E-mail: fuzzyman AT voidspace DOT org DOT uk # http://www.voidspace.org.uk/python/mock/ from tests.support import is_instance, unittest2, X from mock import ( Mock, MagicMock, NonCallableMagicMock, NonCallableMock, patch, create_autospec ) """ Note that NonCallableMock and NonCallableMagicMock still have the unused (and unusable) attributes: return_value, side_effect, call_count, call_args and call_args_list. These could be removed or raise errors on getting / setting. They also have the assert_called_with and assert_called_once_with methods. Removing these would be pointless as fetching them would create a mock (attribute) that could be called without error. """ class TestCallable(unittest2.TestCase): def test_non_callable(self): for mock in NonCallableMagicMock(), NonCallableMock(): self.assertRaises(TypeError, mock) self.assertFalse(hasattr(mock, '__call__')) def test_attributes(self): one = NonCallableMock() self.assertTrue(issubclass(type(one.one), Mock)) two = NonCallableMagicMock() self.assertTrue(issubclass(type(two.two), MagicMock)) def test_subclasses(self): class MockSub(Mock): pass one = MockSub() self.assertTrue(issubclass(type(one.one), MockSub)) class MagicSub(MagicMock): pass two = MagicSub() self.assertTrue(issubclass(type(two.two), MagicSub)) def test_patch_spec(self): patcher = patch('%s.X' % __name__, spec=True) mock = patcher.start() self.addCleanup(patcher.stop) instance = mock() mock.assert_called_once_with() self.assertTrue(is_instance(instance, NonCallableMagicMock)) self.assertRaises(TypeError, instance) def test_patch_spec_instance(self): patcher = patch('%s.X' % __name__, spec=X()) mock = patcher.start() self.addCleanup(patcher.stop) self.assertTrue(is_instance(mock, NonCallableMagicMock)) self.assertRaises(TypeError, mock) def test_patch_spec_callable_class(self): class CallableX(X): def __call__(self): pass patcher = patch('%s.X' % __name__, spec=CallableX) mock = patcher.start() self.addCleanup(patcher.stop) instance = mock() mock.assert_called_once_with() self.assertTrue(is_instance(instance, MagicMock)) instance() instance.assert_called_once_with() def test_create_autopsec(self): mock = create_autospec(X) instance = mock() self.assertRaises(TypeError, instance) mock = create_autospec(X()) self.assertRaises(TypeError, mock)
Python
0.000001
4302389b1e4e5ba753b2f76427408910c05f683c
replace our single use of assertEquals with assertEqual
tests/thirdparty_tests.py
tests/thirdparty_tests.py
# -*- coding: utf-8 -*- # # Copyright (C) 2008 John Paulett (john -at- paulett.org) # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. import unittest import jsonpickle RSS_DOC = """<?xml version="1.0" encoding="utf-8"?> <feed xmlns="http://www.w3.org/2005/Atom" xml:base="http://example.org/" xml:lang="en"> <title type="text">Sample Feed</title> <subtitle type="html">For documentation &lt;em&gt;only&lt;/em&gt;</subtitle> <link rel="alternate" type="html" href="/"/> <link rel="self" type="application/atom+xml" href="http://www.example.org/atom10.xml"/> <rights type="html">&lt;p>Copyright 2005, Mark Pilgrim&lt;/p>&lt;</rights> <generator uri="http://example.org/generator/" version="4.0">Sample Toolkit</generator> <id>tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml</id> <updated>2005-11-09T11:56:34Z</updated> <entry> <title>First entry title</title> <link rel="alternate" href="/entry/3"/> <link rel="related" type="text/html" href="http://search.example.com/"/> <link rel="via" type="text/html" href="http://toby.example.com/examples/atom10"/> <link rel="enclosure" type="video/mpeg4" href="http://www.example.com/movie.mp4" length="42301"/> <id>tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml:3</id> <published>2005-11-09T00:23:47Z</published> <updated>2005-11-09T11:56:34Z</updated> <author> <name>Mark Pilgrim</name> <uri>http://diveintomark.org/</uri> <email>mark@example.org</email> </author> <contributor> <name>Joe</name> <uri>http://example.org/joe/</uri> <email>joe@example.org</email> </contributor> <contributor> <name>Sam</name> <uri>http://example.org/sam/</uri> <email>sam@example.org</email> </contributor> <summary type="text">Watch out for nasty tricks</summary> <content type="xhtml" xml:base="http://example.org/entry/3" xml:lang="en-US"> <div xmlns="http://www.w3.org/1999/xhtml">Watch out for <span style="background: url(javascript:window.location='http://example.org/')"> nasty tricks</span></div> </content> </entry> </feed>""" class FeedParserTest(unittest.TestCase): def setUp(self): try: import feedparser except ImportError as e: if hasattr(self, 'skipTest'): doit = self.skipTest else: doit = self.fail doit('feedparser module not available, please install') self.doc = feedparser.parse(RSS_DOC) def test(self): pickled = jsonpickle.encode(self.doc) unpickled = jsonpickle.decode(pickled) self.assertEqual(self.doc['feed']['title'], unpickled['feed']['title']) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(FeedParserTest, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
# -*- coding: utf-8 -*- # # Copyright (C) 2008 John Paulett (john -at- paulett.org) # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. import unittest import jsonpickle RSS_DOC = """<?xml version="1.0" encoding="utf-8"?> <feed xmlns="http://www.w3.org/2005/Atom" xml:base="http://example.org/" xml:lang="en"> <title type="text">Sample Feed</title> <subtitle type="html">For documentation &lt;em&gt;only&lt;/em&gt;</subtitle> <link rel="alternate" type="html" href="/"/> <link rel="self" type="application/atom+xml" href="http://www.example.org/atom10.xml"/> <rights type="html">&lt;p>Copyright 2005, Mark Pilgrim&lt;/p>&lt;</rights> <generator uri="http://example.org/generator/" version="4.0">Sample Toolkit</generator> <id>tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml</id> <updated>2005-11-09T11:56:34Z</updated> <entry> <title>First entry title</title> <link rel="alternate" href="/entry/3"/> <link rel="related" type="text/html" href="http://search.example.com/"/> <link rel="via" type="text/html" href="http://toby.example.com/examples/atom10"/> <link rel="enclosure" type="video/mpeg4" href="http://www.example.com/movie.mp4" length="42301"/> <id>tag:feedparser.org,2005-11-09:/docs/examples/atom10.xml:3</id> <published>2005-11-09T00:23:47Z</published> <updated>2005-11-09T11:56:34Z</updated> <author> <name>Mark Pilgrim</name> <uri>http://diveintomark.org/</uri> <email>mark@example.org</email> </author> <contributor> <name>Joe</name> <uri>http://example.org/joe/</uri> <email>joe@example.org</email> </contributor> <contributor> <name>Sam</name> <uri>http://example.org/sam/</uri> <email>sam@example.org</email> </contributor> <summary type="text">Watch out for nasty tricks</summary> <content type="xhtml" xml:base="http://example.org/entry/3" xml:lang="en-US"> <div xmlns="http://www.w3.org/1999/xhtml">Watch out for <span style="background: url(javascript:window.location='http://example.org/')"> nasty tricks</span></div> </content> </entry> </feed>""" class FeedParserTest(unittest.TestCase): def setUp(self): try: import feedparser except ImportError as e: if hasattr(self, 'skipTest'): doit = self.skipTest else: doit = self.fail doit('feedparser module not available, please install') self.doc = feedparser.parse(RSS_DOC) def test(self): pickled = jsonpickle.encode(self.doc) unpickled = jsonpickle.decode(pickled) self.assertEquals(self.doc['feed']['title'], unpickled['feed']['title']) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(FeedParserTest, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
Python
0
0bf6f0b6021b2ca3801b0d68c0ee63e39ddc36df
Make a ValueBuffer class
proj/avg_pdti8/util.py
proj/avg_pdti8/util.py
#!/bin/env python # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nmigen import Mux, Signal, signed from nmigen_cfu import InstructionBase, SimpleElaboratable, TestBase, Cfu, CfuTestBase from nmigen.sim import Delay, Settle import unittest class ValueBuffer(SimpleElaboratable): """Buffers a signal. Parameters: inp: A Signal The signal to buffer Interface: capture: Signal() Input. When high, captures input while transparently placing on output. When low, output is equal to last captured input. output: Signal(like inp) Output. The last captured input. """ def __init__(self, inp): self.capture = Signal() self.input = inp self.output = Signal.like(inp) def elab(self, m): captured = Signal.like(self.input) with m.If(self.capture): m.d.sync += captured.eq(self.input) m.d.comb += self.output.eq(Mux(self.capture, self.input, captured)) class ValueBufferTest(TestBase): def create_dut(self): self.in_signal = Signal(4) return ValueBuffer(self.in_signal) def test(self): DATA = [ ((0, 0), 0), ((1, 5), 5), ((0, 3), 5), ((0, 2), 5), ((0, 2), 5), ((1, 2), 2), ((0, 2), 2), ((0, 2), 2), ] def process(): for n, ((capture, in_sig), expected_output) in enumerate(DATA): yield self.in_signal.eq(in_sig) yield self.dut.capture.eq(capture) yield Settle() self.assertEqual((yield self.dut.output), expected_output, f"cycle={n}") yield self.run_sim(process, True) if __name__ == '__main__': unittest.main()
Python
0