commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
3ca7eaca8026088dba1719a5dd2e3da1a6ffe404
add householder qr algorithm - not working yet.
qr/hqr.py
qr/hqr.py
Python
0
@@ -0,0 +1,414 @@ +from numpy import *%0A%0Adef inner(v,w):%0A return sum(v.conj() * w)%0A%0Adef qr(a):%0A (m,n) = shape(a)%0A v = zeros((m,n))%0A for k in range(n):%0A print(%22k=%25d%22 %25 k)%0A x = a%5Bk:m,k%5D%0A x%5B0%5D += sign(x%5B0%5D)*linalg.norm(x)%0A vk = x / linalg.norm(x)%0A v%5Bk:m,k%5D = vk%0A print(vk)%0A a%5Bk:m,k:n%5D -= 2*vk%5B:,newaxis%5D.dot(vk.conj().dot(a%5Bk:m,k:n%5D)%5Bnewaxis,:%5D)%0A print(a)%0A return v%0A
c111bc4dd1c040b2ddf1a83c4d93692a77eb269f
Create __init__.py
fade/database/versions/__init__.py
fade/database/versions/__init__.py
Python
0.000429
@@ -0,0 +1 @@ +%0A
3067c29b47974d75e8f9a6f01596e9be10411b81
Add admin.py file
admin.py
admin.py
Python
0.000001
@@ -0,0 +1,1965 @@ +# Copyright (c) 2010, Scott Ferguson%0A# All rights reserved.%0A# %0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions are met:%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list of conditions and the following disclaimer.%0A# * Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in the%0A# documentation and/or other materials provided with the distribution.%0A# * Neither the name of the software nor the%0A# names of its contributors may be used to endorse or promote products%0A# derived from this software without specific prior written permission.%0A# %0A# THIS SOFTWARE IS PROVIDED BY SCOTT FERGUSON ''AS IS'' AND ANY%0A# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED%0A# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE%0A# DISCLAIMED. IN NO EVENT SHALL SCOTT FERGUSON BE LIABLE FOR ANY%0A# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES%0A# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND%0A# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT%0A# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS%0A# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A%0Afrom django.contrib import admin%0A%0Afrom models import AndroidDevice %0A%0Adef registration_id(object):%0A return '%25s...' %25 object.registration_id%5B:24%5D%0Aregistration_id.short_description = %22Registration ID%22%0A%0Aclass AndroidDeviceAdmin(admin.ModelAdmin):%0A list_display = (%0A 'device_id',%0A registration_id,%0A 'collapse_key',%0A 'last_messaged',%0A 'failed_push' %0A )%0A%0Aadmin.site.register(AndroidDevice, AndroidDeviceAdmin)%0A
d942340fb5cfe8aa9aade11b3117b9848097c8a1
Write an abstraction for storing locality state in ES
alerts/geomodel/journal.py
alerts/geomodel/journal.py
Python
0.000054
@@ -0,0 +1,1043 @@ +'''To make GeoModel code more testable, we abstract interaction with%0AElasticSearch away via a %22journal interface%22. This is just a function that,%0Acalled with an ES index and a list of %60Entry%60, stores the contained locality%0Astate data in ElasticSearch.%0A'''%0A%0Afrom typing import Callable, List, NamedTuple%0A%0Afrom mozdef_util.elasticsearch_client import ElasticsearchClient as ESClient%0A%0Afrom alerts.geomodel.locality import State%0A%0A%0A# TODO: Switch to dataclasses when we upgrade to Python 3.7+%0A%0Aclass Entry(NamedTuple):%0A '''%0A '''%0A%0A identifier: str%0A state: State%0A%0AJournalInterface = Callable%5B%5BList%5BEntry%5D, str%5D%5D%0A%0A%0Adef wrap(client: ESClient) -%3E JournalInterface:%0A '''Wrap an %60ElasticsearchClient%60 in a closure of type %60JournalInterface%60.%0A '''%0A%0A def wrapper(entries: List%5BEntry%5D, esindex: str):%0A for entry in entries:%0A document = dict(entry.state._asdict())%0A%0A client.save_object(%0A index=esindex,%0A body=document,%0A doc_id=entry.identifer)%0A%0A return wrapper%0A
b1f964e9725a18014de17d454bb733b7ad43cd38
Write Pytac script to write all readback pvs to file
pytac/write_to_file_readback_pvs.py
pytac/write_to_file_readback_pvs.py
Python
0
@@ -0,0 +1,867 @@ +import pytac.load_csv%0Aimport pytac.epics%0A%0A%0Adef write_data_to_file(file_name, data):%0A fin = open(file_name, 'w')%0A for row in data:%0A fin.write('%7B0%7D%5Cn'.format(row))%0A fin.close()%0A%0A%0Adef get_readback_pvs(mode):%0A lattice = pytac.load_csv.load(mode, pytac.epics.EpicsControlSystem())%0A elements = lattice.get_elements()%0A readback_pvs = list()%0A%0A # Get the readback pvs of all elements%0A for element in elements:%0A fields = element.get_fields()%0A for field in fields:%0A readback_pvs.append(element.get_pv_name(field, 'readback'))%0A return readback_pvs%0A%0A%0Adef main():%0A readback_pvs = get_readback_pvs('VMX')%0A%0A # Sort the result. It is required for comparison with the Matlab result.%0A readback_pvs = sorted(readback_pvs)%0A write_data_to_file('readback_pvs_py.txt', readback_pvs)%0A%0Aif __name__=='__main__':%0A main()%0A
a88aed479937b09b560c8820d3d5c1003a94b9f1
add google hangout parser
parse_hangouts.py
parse_hangouts.py
Python
0.000004
@@ -0,0 +1,1107 @@ +#!/usr/bin/env python%0A# usage: %60python parse_hangouts.py path/to/takeout/%60%0A%0Aimport json%0Aimport sys%0A%0A%0A# holy wow this format is a mess without any docs%0Adef parse_hangouts(path):%0A data = json.loads(open(path, 'r').read())%0A ids = %7B%7D%0A for conversation in data%5B'conversation_state'%5D:%0A for x in conversation%5B'conversation_state'%5D%5B'conversation'%5D%5B'participant_data'%5D:%0A if 'fallback_name' in x:%0A ids%5Bx%5B'id'%5D%5B'gaia_id'%5D%5D = x%5B'fallback_name'%5D%0A for message in conversation%5B'conversation_state'%5D%5B'event'%5D:%0A sender = ids.get(message%5B'sender_id'%5D%5B'gaia_id'%5D, %22notfound%22)%0A if sender in ('Mike Perrone', 'Michael Perrone')%5C%0A and 'chat_message' in message%5C%0A and 'segment' in message%5B'chat_message'%5D%5B'message_content'%5D:%0A for segment in message%5B'chat_message'%5D%5B'message_content'%5D%5B'segment'%5D:%0A if 'text' in segment:%0A print segment%5B'text'%5D%0A%0A%0Aparse_hangouts(sys.argv%5B-1%5D + %22Hangouts/Hangouts.json%22)%0A
514a04b5ffa7c9e3ede068c860933e9a404e6063
add missing file.
biokit/stats/criteria.py
biokit/stats/criteria.py
Python
0.000001
@@ -0,0 +1,217 @@ +import math%0A%0A%0Adef AIC(L, k):%0A return 2*k - 2 * math.log(L)%0A%0A%0Adef AICc(L, k, n):%0A return AIC(L, k) + 2*k*(k+1.)/(n-k-1.)%0A%0A%0Adef BIC(L, k, n):%0A return -2 * math.log(L) + k * (math.log(n) - math.log(2*math.pi))%0A%0A
ee8b3fd94bac16390b367dc5030489738ff67958
add example to get UETable from data base
tools/getUETable_cfg.py
tools/getUETable_cfg.py
Python
0
@@ -0,0 +1,1579 @@ +import FWCore.ParameterSet.Config as cms%0Aprocess = cms.Process(%22jectxt%22)%0Aprocess.load('Configuration.StandardSequences.Services_cff')%0Aprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')%0A# define your favorite global tag%0Aprocess.GlobalTag.globaltag = '74X_dataRun2_HLT_ppAt5TeV_v0'#Prompt_v4'#auto:run2_data'%0A%0Aprocess.GlobalTag.toGet.extend(%5B%0A cms.PSet(record = cms.string(%22JetCorrectionsRecord%22),%0A tag = cms.string(%22UETableCompatibilityFormat_Calo_v02_offline%22),%0A label = cms.untracked.string(%22UETable_Calo%22)%0A )%0A%5D)%0A%0Aprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))%0Aprocess.source = cms.Source(%22EmptySource%22)%0Aprocess.readAK4PF = cms.EDAnalyzer('JetCorrectorDBReader', %0A # below is the communication to the database %0A payloadName = cms.untracked.string('UETable_Calo'),%0A # this is used ONLY for the name of the printed txt files. You can use any name that you like, %0A # but it is recommended to use the GT name that you retrieved the files from.%0A globalTag = cms.untracked.string('74X_dataRun2_HLT_ppAt5TeV_v0'),%0A printScreen = cms.untracked.bool(False),%0A createTextFile = cms.untracked.bool(True)%0A )%0A%0Aprocess.readAK4PFoff = process.readAK4PF.clone(payloadName = 'UETable_Calo')%0Aprocess.p = cms.Path(process.readAK4PFoff)%0A
5e1440874bc4e3f5ab2de23f72ad7f950ccce12e
add missing migration for the `fake` backend
squad/ci/migrations/0019_add_fake_backend.py
squad/ci/migrations/0019_add_fake_backend.py
Python
0.000001
@@ -0,0 +1,536 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.10 on 2018-02-26 21:03%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('ci', '0018_testjob_dates'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='backend',%0A name='implementation_type',%0A field=models.CharField(choices=%5B('fake', 'fake'), ('lava', 'lava'), ('null', 'null')%5D, default='null', max_length=64),%0A ),%0A %5D%0A
0083a6fadad8bb0f202bab2af183a10f09e19459
Add simple demo of piglow - lighting arms
piglow/demo_piglow.py
piglow/demo_piglow.py
Python
0
@@ -0,0 +1,288 @@ +from piglow import PiGlow%0Aimport time%0A%0Adef brighten_arm( arm ):%0A%0A for i in range( 1, 10 ):%0A piglow.arm( arm, i )%0A time.sleep( 0.11 )%0A%0A time.sleep( 0.5 )%0A piglow.arm( arm, 0 )%0A%0Apiglow = PiGlow()%0A%0Apiglow.all(0)%0A%0Abrighten_arm( 1 )%0Abrighten_arm( 2 )%0Abrighten_arm( 3 )%0A%0A
e9e67243f9a5af7c38294c35fbbbb09f5ea224ee
Add missing pandas import
libs/utils/analysis/cpus_analysis.py
libs/utils/analysis/cpus_analysis.py
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ CPUs Analysis Module """ import matplotlib.pyplot as plt import pylab as pl from trappy.utils import listify from analysis_module import AnalysisModule class CpusAnalysis(AnalysisModule): """ Support for CPUs Signals Analysis :param trace: input Trace object :type trace: :mod:`libs.utils.Trace` """ def __init__(self, trace): super(CpusAnalysis, self).__init__(trace) ############################################################################### # DataFrame Getter Methods ############################################################################### def _dfg_context_switches(self): """ Compute number of context switches on each CPU. :returns: :mod:`pandas.DataFrame` """ if not self._trace.hasEvents('sched_switch'): self._log.warning('Events [sched_switch] not found, context switch ' 'computation not possible!') return None sched_df = self._dfg_trace_event('sched_switch') cpus = range(self._platform['cpus_count']) ctx_sw_df = pd.DataFrame( [len(sched_df[sched_df['__cpu'] == cpu]) for cpu in cpus], index=cpus, columns=['context_switch_cnt'] ) ctx_sw_df.index.name = 'cpu' return ctx_sw_df ############################################################################### # Plotting Methods ############################################################################### def plotCPU(self, cpus=None): """ Plot CPU-related signals for both big and LITTLE clusters. :param cpus: list of CPUs to be plotted :type cpus: list(int) """ if not self._trace.hasEvents('sched_load_avg_cpu'): self._log.warning('Events [sched_load_avg_cpu] not found, ' 'plot DISABLED!') return # Filter on specified cpus if cpus is None: cpus = sorted(self._platform['clusters']['little'] + self._platform['clusters']['big']) cpus = listify(cpus) # Plot: big CPUs bcpus = set(cpus) & set(self._platform['clusters']['big']) if bcpus: self._plotCPU(bcpus, "big") # Plot: LITTLE CPUs lcpus = set(cpus) & set(self._platform['clusters']['little']) if lcpus: self._plotCPU(lcpus, "LITTLE") ############################################################################### # Utility Methods ############################################################################### def _plotCPU(self, cpus, label=''): """ Internal method that generates plots for all input CPUs. :param cpus: list of CPUs to be plotted :type cpus: list(int) """ if label != '': label1 = '{} '.format(label) label2 = '_{}s'.format(label.lower()) # Plot required CPUs _, pltaxes = plt.subplots(len(cpus), 1, figsize=(16, 3*(len(cpus)))) idx = 0 for cpu in cpus: # Reference axes to be used axes = pltaxes if len(cpus) > 1: axes = pltaxes[idx] # Add CPU utilization axes.set_title('{0:s}CPU [{1:d}]'.format(label1, cpu)) df = self._dfg_trace_event('sched_load_avg_cpu') df = df[df.cpu == cpu] if len(df): df[['util_avg']].plot(ax=axes, drawstyle='steps-post', alpha=0.4) # if self._trace.hasEvents('sched_boost_cpu'): # df = self._dfg_trace_event('sched_boost_cpu') # df = df[df.cpu == cpu] # if len(df): # df[['usage', 'boosted_usage']].plot( # ax=axes, # style=['m-', 'r-'], # drawstyle='steps-post'); # Add Capacities data if avilable if self._trace.hasEvents('cpu_capacity'): df = self._dfg_trace_event('cpu_capacity') df = df[df.cpu == cpu] if len(df): # data = df[['capacity', 'tip_capacity', 'max_capacity']] # data.plot(ax=axes, style=['m', 'y', 'r'], data = df[['capacity', 'tip_capacity']] data.plot(ax=axes, style=['m', '--y'], drawstyle='steps-post') # Add overutilized signal to the plot self._trace.analysis.status.plotOverutilized(axes) axes.set_ylim(0, 1100) axes.set_xlim(self._trace.x_min, self._trace.x_max) if idx == 0: axes.annotate("{}CPUs Signals".format(label1), xy=(0, axes.get_ylim()[1]), xytext=(-50, 25), textcoords='offset points', fontsize=16) # Disable x-axis timestamp for top-most cpus if len(cpus) > 1 and idx < len(cpus)-1: axes.set_xticklabels([]) axes.set_xlabel('') axes.grid(True) idx += 1 # Save generated plots into datadir figname = '{}/{}cpus{}.png'.format(self._trace.plots_dir, self._trace.plots_prefix, label2) pl.savefig(figname, bbox_inches='tight') def plotContextSwitch(self): """ Plot histogram of context switches on each CPU. """ if not self._trace.hasEvents('sched_switch'): self._log.warning('Events [sched_switch] not found, plot DISABLED!') return ctx_sw_df = self._dfg_context_switches() ax = ctx_sw_df.plot.bar(title="Per-CPU Task Context Switches", legend=False, figsize=(16, 8)) ax.grid() # vim :set tabstop=4 shiftwidth=4 expandtab
Python
0.000001
@@ -710,16 +710,36 @@ ab as pl +%0Aimport pandas as pd %0A%0Afrom t
b3f8be5b6ab7e4e713004447a3cfbda743d80394
Add management command to update corpus logic hashes
rules/management/commands/CorpusLogicUpdate.py
rules/management/commands/CorpusLogicUpdate.py
Python
0
@@ -0,0 +1,940 @@ +import logging%0Afrom django.core.management.base import BaseCommand, CommandError%0A%0Afrom plyara import YaraParser%0Afrom rules.models import YaraRule%0A%0A# Configure Logging%0Alogging.basicConfig(level=logging.INFO)%0A%0A%0Aclass Command(BaseCommand):%0A%0A help = 'Recalculate the logic hashes of the entire rule corpus'%0A%0A def handle(self, *args, **options):%0A corpus = YaraRule.objects.all()%0A rule_count = corpus.count()%0A message = 'Updating logic hashes for %7B%7D rules'.format(rule_count)%0A logging.info(message)%0A rule_index = 0%0A%0A for rule in corpus.iterator():%0A rule_index += 1%0A logic_data = %7B'strings': rule.strings, 'condition_terms': rule.condition%7D%0A logic_hash = YaraParser.parserInterpreter.generateLogicHash(logic_data)%0A rule.logic_hash = logic_hash%0A rule.save()%0A logging.info('Rule Logic Update: %7B%7D of %7B%7D'.format(rule_index, rule_count))%0A
aefb6fbf38f8756458e487328139caf41afb6cee
Create MD5-DICT.py
MD5-DICT.py
MD5-DICT.py
Python
0
@@ -0,0 +1,572 @@ +from hashlib import md5%0A# by TheZakMan%0A# Exemplo de md5: 21232f297a57a5a743894a0e4a801fc3 (admin)%0A# dict: /usr/share/wordlists/rockyou.txt%0A%0Aprint %22%5BMd5 Dict-Cracker%5D%22%0Aprint %22%7C wordlist.txt %7C%5Cn%22%0Acrackme = raw_input(%22MD5:%22)%0A%0A#f = open('wordlist.txt', 'r')%0Af = open('/usr/share/wordlists/rockyou.txt', 'r')%0A%0Awords = %5Bline%5B0:-1%5D for line in f.readlines()%5D%0A%0A%0A#words = 'test', 'alex', 'steve', 'admin'%0A#print words%0A%0A%0Afor word in words:%0A%09if md5(word).hexdigest() == crackme:%0A%09%09print %22%5CnCracked:%22, word%0A%09%09break%0A%0Aif md5(word).hexdigest() != crackme:%0A print %22%5Cn%5B*%5D Not Found!%22%0A
14e32e60181083c8d0271fc974f3f1161ea81c74
Add first pass on create_tab script
tools/tab/create_tab.py
tools/tab/create_tab.py
Python
0
@@ -0,0 +1,754 @@ +#!/usr/bin/env python%0A%0Aimport datetime%0Aimport io%0Aimport os%0Aimport sys%0Aimport tarfile%0A%0ATAB_VERSION = 1%0A%0Aoutput_filename = sys.argv%5B1%5D%0Aname = sys.argv%5B2%5D%0Ainputs = sys.argv%5B3:%5D%0A%0Ametadata = %5B%5D%0A%0A%0Ametadata.append('tab-version = %7B%7D'.format(TAB_VERSION))%0Ametadata.append('name = %22%7B%7D%22'.format(name))%0Ametadata.append('only-for-boards = %22%22')%0Ametadata.append('build-date = %7B%7D'.format(datetime.datetime.now().isoformat()%5B:19%5D+'Z'))%0A%0A%0Awith tarfile.open(output_filename, 'w') as tar:%0A%09for name in inputs:%0A%09%09arcname = os.path.basename(name)%0A%09%09tar.add(name, arcname=arcname)%0A%0A%09# Add metadata%0A%09data = '%5Cn'.join(metadata).encode('utf-8')%0A%09file = io.BytesIO(data)%0A%09info = tarfile.TarInfo(name='metadata.toml')%0A%09info.size = len(data)%0A%09tar.addfile(tarinfo=info, fileobj=file)%0A
916250bc9509986f1dfce3b09ddbc7a49aa79d42
Add admin for payments
payments/admin.py
payments/admin.py
Python
0
@@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*-%0Afrom django.contrib import admin%0A%0Afrom .models import Payment, Refund%0A%0A%0Aadmin.site.register(Payment)%0Aadmin.site.register(Refund)%0A
25f0615f4fb35779ce8688c5d29f92288ac2c30d
Add filesystem checks class
util/filesystem.py
util/filesystem.py
Python
0.000001
@@ -0,0 +1,1213 @@ +import os%0A%0A%0Aclass FSCheck:%0A def __init__(self, filepath, name: None):%0A if name is None:%0A name = filepath%0A%0A self.filepath = filepath%0A self.name = name%0A%0A def exists(self, error: True):%0A if not os.path.exists(self.filepath):%0A if error:%0A print('%25s executable doesn%5C't exist. Install it first' %25 self.name)%0A exit(32)%0A else:%0A return False%0A%0A return True%0A%0A def access(self, type_, error: True):%0A if type_ in %5B'read', 'r'%5D:%0A access = os.R_OK%0A access_human = 'read'%0A elif type_ in %5B'write', 'w'%5D:%0A access = os.W_OK%0A access_human = 'write'%0A elif type_ in %5B'exec', 'x', 'execute'%5D:%0A access = os.X_OK%0A access_human = 'exec'%0A else:%0A access = None%0A access_human = None%0A%0A if access is None:%0A raise AttributeError('Unknown access type')%0A%0A if not os.access(self.filepath, access):%0A if error:%0A print('No %25s access to %25s' %25 (access_human, self.name))%0A exit(31)%0A else:%0A return False%0A%0A return True%0A
cd7d80da95a3702a31fe0b8a62f4eadfeaa65b85
simplify user profile source
authentic2/attribute_aggregator/user_profile.py
authentic2/attribute_aggregator/user_profile.py
''' VERIDIC - Towards a centralized access control system Copyright (C) 2011 Mikael Ates This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import logging from django.contrib.auth.models import SiteProfileNotAvailable from django.core.exceptions import ObjectDoesNotExist from authentic2.attribute_aggregator.core import get_profile_field_name_from_definition, \ get_definition_from_profile_field_name logger = logging.getLogger('attribute_aggregator.user_profile') SOURCE_NAME = 'USER_PROFILE' def get_attributes(user, definitions=None, source=None, **kwargs): ''' Return attributes dictionnary Dictionnary format: attributes = dict() data_from_source = list() a1 = dict() a1['oid'] = definition_name Or a1['definition'] = definition_name definition may be the definition name like 'gn' or an alias like 'givenName' Or a1['name'] = attribute_name_in_ns a1['namespace'] = ns_name a1['values'] = list_of_values data_from_source.append(a1) ... data_from_source.append(a2) attributes[source_name] = data_from_source First attempt on 'definition' key. Else, definition is searched by 'name' and 'namespece' keys. ''' from models import AttributeSource try: AttributeSource.objects.get(name=SOURCE_NAME) except: logger.debug('get_attributes: \ Profile source not configured') return None if source and source.name != SOURCE_NAME: logger.debug('get_attributes: \ The required source %s is not user profile' % source) return None attributes = dict() data = [] try: user_profile = user field_names = set() field_names.update(user_profile._meta.get_all_field_names()) field_names.update(user._meta.get_all_field_names()) fields = [] if definitions: for definition in definitions: logger.debug('get_attributes: looking for %s' % definition) field_name = get_profile_field_name_from_definition(definition) if not field_name: ''' Profile model may be extended without modifying the mapping file if the attribute name is the same as the definition ''' logger.debug('get_attributes: \ field name will be the definition') field_name = definition if field_name in field_names: fields.append((field_name, definition)) else: logger.debug('get_attributes: Field not found in profile') else: fields = [(field_name, get_definition_from_profile_field_name(field_name)) \ for field_name \ in field_names \ if get_definition_from_profile_field_name(field_name)] for field_name, definition in fields: field = user_profile._meta.get_field_by_name(field_name)[0] logger.debug('get_attributes: found field %s aka %s' \ % (field_name, field.verbose_name)) value = getattr(user_profile, field_name, None) or getattr(user, field_name, None) if value: if callable(value): value = value() logger.debug('get_attributes: found value %s' % value) attr = {} attr['definition'] = definition attr['values'] = [value] data.append(attr) else: logger.debug('get_attributes: no value found') except (SiteProfileNotAvailable, ObjectDoesNotExist): logger.debug('get_attributes: No user profile') return None attributes[SOURCE_NAME] = data return attributes
Python
0.000028
@@ -2440,123 +2440,26 @@ -user_profile = user%0A field_names = set()%0A field_names.update(user_profile._meta.get_all_field_names() +field_names = set( )%0A @@ -3758,24 +3758,16 @@ d = user -_profile ._meta.g @@ -3940,51 +3940,8 @@ ue = - getattr(user_profile, field_name, None) or get
edabec29ebb99e938fd3523951597e336ddd3adc
Add text vectorizers benchmarks (#9086)
benchmarks/bench_text_vectorizers.py
benchmarks/bench_text_vectorizers.py
Python
0
@@ -0,0 +1,2112 @@ +%22%22%22%0A%0ATo run this benchmark, you will need,%0A%0A * scikit-learn%0A * pandas%0A * memory_profiler%0A * psutil (optional, but recommended)%0A%0A%22%22%22%0A%0Afrom __future__ import print_function%0A%0Aimport timeit%0Aimport itertools%0A%0Aimport numpy as np%0Aimport pandas as pd%0Afrom memory_profiler import memory_usage%0A%0Afrom sklearn.datasets import fetch_20newsgroups%0Afrom sklearn.feature_extraction.text import (CountVectorizer, TfidfVectorizer,%0A HashingVectorizer)%0A%0An_repeat = 3%0A%0A%0Adef run_vectorizer(Vectorizer, X, **params):%0A def f():%0A vect = Vectorizer(**params)%0A vect.fit_transform(X)%0A return f%0A%0A%0Atext = fetch_20newsgroups(subset='train').data%0A%0Aprint(%22=%22*80 + '%5Cn#' + %22 Text vectorizers benchmark%22 + '%5Cn' + '='*80 + '%5Cn')%0Aprint(%22Using a subset of the 20 newsrgoups dataset (%7B%7D documents).%22%0A .format(len(text)))%0Aprint(%22This benchmarks runs in ~20 min ...%22)%0A%0Ares = %5B%5D%0A%0Afor Vectorizer, (analyzer, ngram_range) in itertools.product(%0A %5BCountVectorizer, TfidfVectorizer, HashingVectorizer%5D,%0A %5B('word', (1, 1)),%0A ('word', (1, 2)),%0A ('word', (1, 4)),%0A ('char', (4, 4)),%0A ('char_wb', (4, 4))%0A %5D):%0A%0A bench = %7B'vectorizer': Vectorizer.__name__%7D%0A params = %7B'analyzer': analyzer, 'ngram_range': ngram_range%7D%0A bench.update(params)%0A dt = timeit.repeat(run_vectorizer(Vectorizer, text, **params),%0A number=1,%0A repeat=n_repeat)%0A bench%5B'time'%5D = %22%7B:.2f%7D (+-%7B:.2f%7D)%22.format(np.mean(dt), np.std(dt))%0A%0A mem_usage = memory_usage(run_vectorizer(Vectorizer, text, **params))%0A%0A bench%5B'memory'%5D = %22%7B:.1f%7D%22.format(np.max(mem_usage))%0A%0A res.append(bench)%0A%0A%0Adf = pd.DataFrame(res).set_index(%5B'analyzer', 'ngram_range', 'vectorizer'%5D)%0A%0Aprint('%5Cn========== Run time performance (sec) ===========%5Cn')%0Aprint('Computing the mean and the standard deviation '%0A 'of the run time over %7B%7D runs...%5Cn'.format(n_repeat))%0Aprint(df%5B'time'%5D.unstack(level=-1))%0A%0Aprint('%5Cn=============== Memory usage (MB) ===============%5Cn')%0Aprint(df%5B'memory'%5D.unstack(level=-1))%0A
9bee248bce5edbf073f66e5d7a621f22bbba314f
Fix a failing test
zmq/devices/__init__.py
zmq/devices/__init__.py
"""0MQ Device classes for running in background threads or processes.""" # # Copyright (c) 2010 Brian E. Granger # # This file is part of pyzmq. # # pyzmq is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pyzmq is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from zmq.core.device import device from zmq.devices import basedevice, monitoredqueue from zmq.devices.basedevice import * from zmq.devices.monitoredqueue import * from zmq.devices.monitoredqueuedevice import * __all__ = ['device'] for submod in (basedevice, monitoredqueue, monitoredqueuedevice): __all__.extend(submod.__all__)
Python
0.999524
@@ -1075,16 +1075,38 @@ redqueue +, monitoredqueuedevice %0A%0Afrom z
8c168933c85f828ec85d6c069143e3c4174657b7
Create 10.CubeProperties.py
TechnologiesFundamentals/ProgrammingFundamentals/MethodsAndDebugging-Excercises/10.CubeProperties.py
TechnologiesFundamentals/ProgrammingFundamentals/MethodsAndDebugging-Excercises/10.CubeProperties.py
Python
0
@@ -0,0 +1,442 @@ +import math%0A%0AcubeSide = float(input())%0Aparameter = input()%0A%0Aif parameter == %22face%22:%0A face = math.sqrt(math.pow(cubeSide, 2) * 2)%0A print(%22%25.2f%22 %25 face)%0A%0Aelif parameter == %22space%22:%0A space = math.sqrt(math.pow(cubeSide, 2) * 3)%0A print(%22%25.2f%22 %25 space)%0A%0Aelif parameter == %22volume%22:%0A volume = math.pow(cubeSide, 3)%0A print(%22%25.2f%22 %25 volume)%0A%0Aelif parameter == %22area%22:%0A area = math.pow(cubeSide, 2) * 6%0A print(%22%25.2f%22 %25 area) %0A
9df4f11d878ee8d13dcbcee49745bdcc8ab3e507
Remove logging from test config
.travis/localsettings.py
.travis/localsettings.py
import os ####### Configuration for CommCareHQ Running on Travis-CI ##### ####### Database config. This assumes Postgres ####### DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'commcarehq', 'USER': 'postgres', 'PASSWORD': '', 'HOST': 'localhost', 'PORT': '5432' } } SQL_REPORTING_DATABASE_URL = "sqlite:////tmp/commcare_reporting_test.db" ####### Couch Config ###### COUCH_HTTPS = False COUCH_SERVER_ROOT = '127.0.0.1:5984' COUCH_USERNAME = '' COUCH_PASSWORD = '' COUCH_DATABASE_NAME = 'commcarehq' ######## Email setup ######## # email settings: these ones are the custom hq ones EMAIL_LOGIN = "notifications@dimagi.com" EMAIL_PASSWORD = "******" EMAIL_SMTP_HOST = "smtp.gmail.com" EMAIL_SMTP_PORT = 587 EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend' ####### Bitly ######## BITLY_LOGIN = 'dimagi' BITLY_APIKEY = '*******' ####### Jar signing config ######## _ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) JAR_SIGN = dict( jad_tool = os.path.join(_ROOT_DIR, "corehq", "apps", "app_manager", "JadTool.jar"), key_store = os.path.join(_ROOT_DIR, "InsecureTestingKeyStore"), key_alias = "javarosakey", store_pass = "onetwothreefourfive", key_pass = "onetwothreefourfive", ) # prod settings SOIL_DEFAULT_CACHE = "redis" SOIL_BACKEND = "soil.CachedDownload" CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': 'localhost:11211', }, 'redis': { 'BACKEND': 'redis_cache.cache.RedisCache', 'LOCATION': 'localhost:6379', 'OPTIONS': {}, } } ELASTICSEARCH_HOST = 'localhost' ELASTICSEARCH_PORT = 9200 AUDIT_ADMIN_VIEWS=False
Python
0.000001
@@ -1770,8 +1770,334 @@ S=False%0A +%0A# No logging%0ALOGGING = %7B%0A 'version': 1,%0A 'handlers': %7B%0A 'null': %7B%0A 'level': 'DEBUG',%0A 'class': 'django.utils.log.NullHandler',%0A %7D,%0A %7D,%0A 'loggers': %7B%0A '': %7B%0A 'level': 'CRITICAL',%0A 'handler': 'null',%0A 'propagate': False,%0A %7D%0A %7D%0A%7D%0A
16f29bfc832a64accd6ef67c2140f70ea07f2f05
Add PyUnit for deep feature extraction of a LeNet model with mxnet.
h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater_feature_extraction.py
h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater_feature_extraction.py
Python
0
@@ -0,0 +1,1093 @@ +from __future__ import print_function%0Aimport sys, os%0Asys.path.insert(1, os.path.join(%22..%22,%22..%22,%22..%22))%0Aimport h2o%0Afrom tests import pyunit_utils%0Afrom h2o.estimators.deepwater import H2ODeepWaterEstimator%0A%0Adef deepwater_lenet():%0A if not H2ODeepWaterEstimator.available(): return%0A%0A frame = h2o.import_file(pyunit_utils.locate(%22bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv%22))%0A print(frame.head(5))%0A model = H2ODeepWaterEstimator(epochs=100, learning_rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)%0A model.train(x=%5B0%5D,y=1, training_frame=frame)%0A%0A extracted = model.deepfeatures(frame, %22pooling1_output%22)%0A #print(extracted.describe())%0A print(extracted.ncols)%0A assert extracted.ncols == 800, %22extracted frame doesn't have 800 columns%22%0A%0A extracted = model.deepfeatures(frame, %22activation2_output%22)%0A #print(extracted.describe())%0A print(extracted.ncols)%0A assert extracted.ncols == 500, %22extracted frame doesn't have 500 columns%22%0A%0A h2o.remove_all()%0A%0Aif __name__ == %22__main__%22:%0A pyunit_utils.standalone_test(deepwater_lenet)%0Aelse:%0A deepwater_lenet()%0A
9a1635dcdb21548fcb7b1f718624c991602588e6
Initialize P01_isPhoneNumber
books/AutomateTheBoringStuffWithPython/Chapter07/P01_isPhoneNumber.py
books/AutomateTheBoringStuffWithPython/Chapter07/P01_isPhoneNumber.py
Python
0.00737
@@ -0,0 +1,704 @@ +# This program returns True if a string is a phone number and False if not%0A# However, it's not very efficient%0A%0A%0Adef isPhoneNumber(text):%0A if len(text) != 12:%0A return False%0A for i in range(0, 3):%0A if not text%5Bi%5D.isdecimal():%0A return False%0A if text%5B3%5D != '-':%0A return False%0A for i in range(4, 7):%0A if not text%5Bi%5D.isdecimal():%0A return False%0A if text%5B7%5D != '-':%0A return False%0A for i in range(8, 12):%0A if not text%5Bi%5D.isdecimal():%0A return False%0A return True%0A%0A%0Aprint('415-555-4242 is a phone number:')%0Aprint(isPhoneNumber('415-555-4242'))%0Aprint('Moshi moshi is a phone number:')%0Aprint(isPhoneNumber('Moshi moshi'))%0A
715e43bc81227d86f6a8a102aab488d47db801f4
Make a jobspec
tools/run_tests/run_microbenchmark.py
tools/run_tests/run_microbenchmark.py
#!/usr/bin/env python2.7 # Copyright 2017, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import multiprocessing import os import subprocess import sys import python_utils.jobset as jobset import python_utils.start_port_server as start_port_server flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph') os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..')) if not os.path.exists('reports'): os.makedirs('reports') port_server_port = 32766 start_port_server.start_port_server(port_server_port) def fnize(s): out = '' for c in s: if c in '<>, /': if len(out) and out[-1] == '_': continue out += '_' else: out += c return out # index html index_html = """ <html> <head> <title>Microbenchmark Results</title> </head> <body> """ def heading(name): global index_html index_html += "<h1>%s</h1>\n" % name def link(txt, tgt): global index_html index_html += "<p><a href=\"%s\">%s</a></p>\n" % (tgt, txt) benchmarks = [] profile_analysis = [] cleanup = [] for bm_name in sys.argv[1:]: # generate latency profiles heading('Latency Profiles: %s' % bm_name) subprocess.check_call( ['make', bm_name, 'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()]) for line in subprocess.check_output(['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines(): link(line, '%s.txt' % fnize(line)) benchmarks.append( jobset.JobSpec(['bins/basicprof/%s' % bm_name, '--benchmark_filter=^%s$' % line], environ={'LATENCY_TRACE': '%s.trace' % fnize(line)})) profile_analysis.append( jobset.JobSpec([sys.executable, 'tools/profiling/latency_profile/profile_analyzer.py', '--source', '%s.trace' % fnize(line), '--fmt', 'simple', '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=None)) cleanup.append('rm', '%s.trace' % fnize(line)) if len(benchmarks) >= 2 * multiprocessing.cpu_count(): jobset.run(benchmarks, maxjobs=multiprocessing.cpu_count()/2, add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) benchmarks = [] profile_analysis = [] cleanup = [] if len(benchmarks): jobset.run(benchmarks, maxjobs=multiprocessing.cpu_count()/2, add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port}) jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count()) jobset.run(cleanup, maxjobs=multiprocessing.cpu_count()) # generate flamegraphs heading('Flamegraphs: %s' % bm_name) subprocess.check_call( ['make', bm_name, 'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()]) for line in subprocess.check_output(['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines(): subprocess.check_call(['sudo', 'perf', 'record', '-g', '-c', '1000', 'bins/mutrace/%s' % bm_name, '--benchmark_filter=^%s$' % line, '--benchmark_min_time=20']) with open('/tmp/bm.perf', 'w') as f: f.write(subprocess.check_output(['sudo', 'perf', 'script'])) with open('/tmp/bm.folded', 'w') as f: f.write(subprocess.check_output([ '%s/stackcollapse-perf.pl' % flamegraph_dir, '/tmp/bm.perf'])) link(line, '%s.svg' % fnize(line)) with open('reports/%s.svg' % fnize(line), 'w') as f: f.write(subprocess.check_output([ '%s/flamegraph.pl' % flamegraph_dir, '/tmp/bm.folded'])) index_html += "</body>\n</html>\n" with open('reports/index.html', 'w') as f: w.write(index_html)
Python
0.998946
@@ -3416,16 +3416,32 @@ .append( +jobset.JobSpec(%5B 'rm', '%25 @@ -3458,24 +3458,26 @@ fnize(line) +%5D) )%0A if len
c564c7361769c116a07ebb127c9453ec09b75699
Return true when iCloud finishes setting up
homeassistant/components/device_tracker/icloud.py
homeassistant/components/device_tracker/icloud.py
""" homeassistant.components.device_tracker.icloud ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Device tracker platform that supports scanning iCloud devices. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.icloud/ """ import logging import re from homeassistant.const import CONF_USERNAME, CONF_PASSWORD from homeassistant.helpers.event import track_utc_time_change _LOGGER = logging.getLogger(__name__) REQUIREMENTS = ['pyicloud==0.7.2'] CONF_INTERVAL = 'interval' DEFAULT_INTERVAL = 8 def setup_scanner(hass, config, see): """ Set up the iCloud Scanner. """ from pyicloud import PyiCloudService from pyicloud.exceptions import PyiCloudFailedLoginException from pyicloud.exceptions import PyiCloudNoDevicesException # Get the username and password from the configuration username = config.get(CONF_USERNAME) password = config.get(CONF_PASSWORD) if username is None or password is None: _LOGGER.error('Must specify a username and password') return try: _LOGGER.info('Logging into iCloud Account') # Attempt the login to iCloud api = PyiCloudService(username, password, verify=True) except PyiCloudFailedLoginException as error: _LOGGER.exception('Error logging into iCloud Service: %s', error) return def keep_alive(now): """ Keeps authenticating iCloud connection. """ api.authenticate() _LOGGER.info("Authenticate against iCloud") track_utc_time_change(hass, keep_alive, second=0) def update_icloud(now): """ Authenticate against iCloud and scan for devices. """ try: # The session timeouts if we are not using it so we # have to re-authenticate. This will send an email. api.authenticate() # Loop through every device registered with the iCloud account for device in api.devices: status = device.status() location = device.location() # If the device has a location add it. If not do nothing if location: see( dev_id=re.sub(r"(\s|\W|')", '', status['name']), host_name=status['name'], gps=(location['latitude'], location['longitude']), battery=status['batteryLevel']*100, gps_accuracy=location['horizontalAccuracy'] ) else: # No location found for the device so continue continue except PyiCloudNoDevicesException: _LOGGER.info('No iCloud Devices found!') track_utc_time_change( hass, update_icloud, minute=range(0, 60, config.get(CONF_INTERVAL, DEFAULT_INTERVAL)), second=0 )
Python
0
@@ -1075,24 +1075,30 @@ return + False %0A%0A try:%0A @@ -1447,16 +1447,22 @@ return + False %0A%0A de @@ -3052,12 +3052,29 @@ ond=0%0A )%0A +%0A return True%0A
af6272941a66967c3a64d735223fefc917056562
add example
examples/python/rgb-lcd.py
examples/python/rgb-lcd.py
Python
0
@@ -0,0 +1,1231 @@ +# Author: Brendan Le Foll %3Cbrendan.le.foll@intel.com%3E%0A# Copyright (c) 2014 Intel Corporation.%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining%0A# a copy of this software and associated documentation files (the%0A# %22Software%22), to deal in the Software without restriction, including%0A# without limitation the rights to use, copy, modify, merge, publish,%0A# distribute, sublicense, and/or sell copies of the Software, and to%0A# permit persons to whom the Software is furnished to do so, subject to%0A# the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be%0A# included in all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND,%0A# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF%0A# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND%0A# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE%0A# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION%0A# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION%0A# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.%0A%0Aimport pyupm_i2clcd as lcd%0Ax = lcd.Jhd1313m1(0, 0x3E, 0x62)%0Ax.write('hello')%0A
771eede117c29af75c1d8d21f0da538bd280b5c1
Create search.py
search.py
search.py
Python
0
@@ -0,0 +1,2516 @@ +# Requires Parallel to be installed%0A# Use the below command to start with all available cores used%0A# seq %60nproc%60 %7C parallel -u python script.py%0A%0A__authors__ = %5B'Chick3nputer', 'Supersam654'%5D%0A%0Afrom itertools import islice, product%0Aimport string%0Aimport hashlib%0Afrom random import shuffle%0Afrom sys import argv%0A%0Achars = string.ascii_uppercase + string.digits + string.ascii_lowercase%0A%0Adef generate_strings(size):%0A alphabet = list(chars * size)%0A while True:%0A shuffle(alphabet)%0A for i in range(0, len(alphabet), size):%0A yield ''.join(alphabet%5Bi: i + size%5D)%0A%0Adef tsum(hexhash):%0A return sum(int(hexhash%5Bi: i + 2%5D, 16) for i in range(0, len(hexhash), 2))%0A%0Adef edit_distance(h1, h2):%0A xor = int(h1, 16) %5E int(h2, 16)%0A return bin(xor)%5B2:%5D.count('1')%0A%0Adef work():%0A # Start both not at 0 and 128 to avoid a lot of startup noise.%0A max_ones = 109%0A min_ones = 19%0A rand_length = 32 - len(%22Chick3nman-%22)%0A i = 0%0A for combo in generate_strings(rand_length):%0A i += 1%0A if i %25 100000000 == 0:%0A print %22Processed %25d hashes.%22 %25 i%0A clear = %22Chick3nman-%22 + combo%0A hashhex = hashlib.md5(clear).hexdigest()%0A%0A ones_count = bin(int(hashhex, 16))%5B2:%5D.count('1')%0A if ones_count %3E max_ones:%0A plain = hashhex + ':' + clear%0A max_ones = ones_count%0A print %22New BITMAX Hash Found %25s = %25s%22 %25 (plain, max_ones)%0A elif ones_count %3C min_ones:%0A plain = hashhex + ':' + clear%0A min_ones = ones_count%0A print %22New BITMIN Hash Found %25s = %25s%22 %25 (plain, min_ones)%0A%0A if hashhex.startswith('ffffffffffffff'):%0A print %22New MAX Hash Found %25s:%25s%22 %25 (hashhex, clear)%0A elif hashhex.startswith('00000000000000'):%0A print %22New MIN Hash Found %25s:%25s%22 %25 (hashhex, clear)%0A%0A tsumhex = tsum(hashhex)%0A if tsumhex %3C 190:%0A print %22New TMIN Hash Found %25s:%25s%22 %25 (hashhex, clear)%0A elif tsumhex %3E 3909:%0A print %22New TMAX Hash Found %25s:%25s%22 %25 (hashhex, clear)%0A%0A base_distance = edit_distance(hashhex, '0123456789abcdeffedcba9876543210')%0A if base_distance %3C 20:%0A print %22New BASE Hash Found %25s:%25s%22 %25 (hashhex, clear)%0A%0A # Can't prefix with Chick3nman and do this one.%0A # fp_distance = edit_distance(clear, hashhex)%0A # if fp_distance %3C 28:%0A # print %22New FP Hash Found %25s:%25s%22 %25s:%25s%22 %25 (hashhex, clear)%0A%0Aif __name__ == '__main__':%0A print %22Starting worker %25s%22 %25 argv%5B1%5D%0A work()%0A
5ee78767ebaa5c1bbceb7ce2c82fa6687169b0c2
Add exercice The Paranoid Android
codingame/medium/paranoid_android.py
codingame/medium/paranoid_android.py
Python
0.000567
@@ -0,0 +1,2709 @@ +class Elevator(object):%0A def __init__(self, floor, pos):%0A super(Elevator, self).__init__()%0A self.floor = floor%0A self.pos = pos%0A self.direction = None%0A%0A def __str__(self):%0A return 'Elevator on floor %25i (pos %25i) with dir %25s' %25 (self.floor, self.pos, self.direction)%0A%0A%0Aclass Game(object):%0A def __init__(self, nbFloors, width, exitFloor, exitPos, nbElevators):%0A super(Game, self).__init__()%0A self.nbFloors = nbFloors%0A self.width = width%0A self.exitFloor = exitFloor%0A self.exitPos = exitPos%0A self.nbElevators = nbElevators%0A self.elevators = %5B0%5D * nbFloors%0A%0A def addElevators(self):%0A for _ in xrange(self.nbElevators):%0A # elevatorFloor: floor on which this elevator is found%0A # elevatorPos: position of the elevator on its floor%0A elevatorFloor, elevatorPos = %5Bint(j) for j in raw_input().split()%5D%0A self.elevators%5BelevatorFloor%5D = Elevator(elevatorFloor, elevatorPos)%0A # Don't forget to add the elevator leading to the exit%0A self.elevators%5Bself.exitFloor%5D = Elevator(self.exitFloor, self.exitPos)%0A%0A def setElevatorsDirections(self):%0A for i in range(self.nbFloors - 1):%0A if (self.elevators%5Bi%5D.pos %3E self.elevators%5Bi+1%5D.pos):%0A self.elevators%5Bi+1%5D.direction = 'LEFT'%0A else:%0A self.elevators%5Bi+1%5D.direction = 'RIGHT'%0A%0A%0A# nbFloors: number of floors%0A# width: width of the area%0A# nbRounds: maximum number of rounds%0A# exitFloor: floor on which the exit is found%0A# exitPos: position of the exit on its floor%0A# nbTotalClones: number of generated clones%0A# nbAdditionalElevators: ignore (always zero)%0A# nbElevators: number of elevators%0AnbFloors, width, nbRounds, exitFloor, exitPos, nbTotalClones, nbAdditionalElevators, nbElevators = %5Bint(i) for i in raw_input().split()%5D%0A%0Agame = Game(nbFloors, width, exitFloor, exitPos, nbElevators)%0Agame.addElevators()%0Agame.setElevatorsDirections()%0A%0AfirstRound = True%0A# Game loop%0Awhile True:%0A # cloneFloor: floor of the leading clone%0A # clonePos: position of the leading clone on its floor%0A # direction: direction of the leading clone: LEFT or RIGHT%0A cloneFloor, clonePos, direction = raw_input().split()%0A cloneFloor = int(cloneFloor)%0A clonePos = int(clonePos)%0A%0A if firstRound:%0A firstRound = False%0A if (clonePos %3C game.elevators%5B0%5D.pos):%0A game.elevators%5B0%5D.direction = 'RIGHT'%0A else:%0A game.elevators%5B0%5D.direction = 'LEFT'%0A%0A if cloneFloor == -1:%0A print 'WAIT'%0A else:%0A if direction == game.elevators%5BcloneFloor%5D.direction:%0A print 'WAIT'%0A else:%0A print 'BLOCK'%0A
f7132b86ca5f4dafeb88ca65b3d7fe71c6886cc5
Add packageinfo command
cerbero/commands/info.py
cerbero/commands/info.py
Python
0.000001
@@ -0,0 +1,2240 @@ +# cerbero - a multi-platform build system for Open Source software%0A# Copyright (C) 2012 Andoni Morales Alastruey %3Cylatuya@gmail.com%3E%0A#%0A# This library is free software; you can redistribute it and/or%0A# modify it under the terms of the GNU Library General Public%0A# License as published by the Free Software Foundation; either%0A# version 2 of the License, or (at your option) any later version.%0A#%0A# This library is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU%0A# Library General Public License for more details.%0A#%0A# You should have received a copy of the GNU Library General Public%0A# License along with this library; if not, write to the%0A# Free Software Foundation, Inc., 59 Temple Place - Suite 330,%0A# Boston, MA 02111-1307, USA.%0A%0Afrom cerbero.commands import Command, register_command%0Afrom cerbero.utils import _, N_, ArgparseArgument%0Afrom cerbero.utils import messages as m%0Afrom cerbero.packages.packagesstore import PackagesStore%0A%0A%0AINFO_TPL='''%0AName: %25(name)s%0AVersion: %25(version)s%0AHomepage: %25(url)s%0ADependencies: %25(deps)s%0ALicences: %25(licenses)s%0ADescription: %25(desc)s%0A'''%0A%0Aclass PackageInfo(Command):%0A doc = N_('Print information about this package')%0A name = 'packageinfo'%0A%0A def __init__(self):%0A Command.__init__(self,%0A %5BArgparseArgument('package', nargs=1,%0A help=_('name of the package')),%0A ArgparseArgument('-l', '--list-files', action='store_true', default=False,%0A help=_('List all files installed by this package')),%0A %5D)%0A%0A def run(self, config, args):%0A store = PackagesStore(config)%0A p_name = args.package%5B0%5D%0A if args.list_files:%0A m.message('%5Cn'.join(store.get_package_files_list(p_name)))%0A else:%0A p = store.get_package(p_name)%0A d = %7B'name': p.name, 'version': p.version, 'url': p.url,%0A 'licenses': ' '.join(p.licenses), 'desc': p.shortdesc,%0A 'deps': ', '.join(store.get_package_deps(p_name))%7D%0A m.message(INFO_TPL %25 d)%0A %0Aregister_command(PackageInfo)%0A
cb79c9bf74cb18f3ee86c7c3d5415ce1b088dde2
Add missing markdown file.
allmychanges/markdown.py
allmychanges/markdown.py
Python
0
@@ -0,0 +1,185 @@ +import CommonMark%0A%0A%0Adef render_markdown(text):%0A parser = CommonMark.DocParser()%0A renderer = CommonMark.HTMLRenderer()%0A ast = parser.parse(text)%0A return renderer.render(ast)%0A
114ea6c10658d2c199c68637d04bdd968fcc4452
Test case for task.info.json files
voyager_tasks/test/test_info_files.py
voyager_tasks/test/test_info_files.py
Python
0
@@ -0,0 +1,1805 @@ +import os%0Aimport sys%0Aimport glob%0Aimport json%0Aimport unittest%0Asys.path.append(os.path.dirname(os.path.dirname(__file__)))%0Aimport voyager_tasks%0A%0A%0Aclass TestInfoFiles(unittest.TestCase):%0A %22%22%22Test case for checking info files exist%0A for each task and have a valid structure.%0A %22%22%22%0A @classmethod%0A def setUpClass(self):%0A self.tasks = set(voyager_tasks.__tasks__)%0A self.info_dir = os.path.abspath(os.path.join(os.path.dirname(os.getcwd()), '..', 'info'))%0A self.json_files = set(%5Bos.path.basename(f).split('.')%5B0%5D for f in glob.glob(os.path.join(self.info_dir, '*.info.json'))%5D)%0A self.names = %5B%5D%0A self.runner = set()%0A self.display = set()%0A files_to_test = self.json_files.intersection(self.tasks)%0A for name in files_to_test:%0A test_file = os.path.join(self.info_dir, '%7B0%7D.info.json'.format(name))%0A with open(test_file) as f:%0A d = json.load(f)%0A self.names.append(d%5B'name'%5D)%0A self.runner.add(d%5B'runner'%5D)%0A self.display.add(d%5B'display'%5D.keys()%5B0%5D)%0A%0A def test_json_exists(self):%0A %22%22%22Ensure an info.json file exists for each task%22%22%22%0A self.assertEqual(self.tasks.issubset(self.json_files), True)%0A%0A def test_json_names(self):%0A %22%22%22Verify each info.json has a valid name field and value%22%22%22%0A self.assertEqual(sorted(list(self.tasks)), sorted(self.names))%0A%0A def test_json_runner(self):%0A self.assertEqual(len(list(self.runner)) == 1 and list(self.runner)%5B0%5D == 'python', True)%0A%0A def test_json_display(self):%0A %22%22%22Default display should be set to 'en' for all info.json files%22%22%22%0A self.assertEqual(len(list(self.display)) == 1 and list(self.display)%5B0%5D == 'en', True)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
24f536a72b0467ff3ee1615f515ecff9fbf36bb3
Add pair sum
ch07_04.py
ch07_04.py
Python
0.999136
@@ -0,0 +1,317 @@ +number_of_data = int(input().strip())%0A%0Ama = %5B0%5D * number_of_data%0Anumbers = %5Bint(c) for c in input().strip().split()%5D%0A%0Afor i in range(1, number_of_data - 1):%0A ma%5Bi%5D = (sum(numbers%5Bi-1:i+2%5D)/3)%0A%0Ama%5B0%5D = sum(numbers%5B0:2%5D)/2%0Ama%5B-1%5D = sum(numbers%5Bnumber_of_data - 2:%5D)/2%0A%0Ama = %5Bstr(d) for d in ma%5D%0Aprint(%22%5Cn%22.join(ma))%0A
afa0efbdfc6bc4d19eaba919bc82c907fce37fa7
add base for API endpoint
datasets/api.py
datasets/api.py
Python
0.000001
@@ -0,0 +1,195 @@ +import json%0A%0Afrom flask import request, Response, url_for%0Afrom jsonschema import validate, ValidationError%0A%0Aimport models%0Aimport decorators%0Afrom datasets import app%0Afrom database import session%0A%0A
ce9ac96a6f1e57ebbce162b7e097675c23f1f2f4
Implement simple gaussian process regression.
projects/jakub/gaussian_processes/gaussian_process_regression.py
projects/jakub/gaussian_processes/gaussian_process_regression.py
Python
0
@@ -0,0 +1,1864 @@ +import csv%0Aimport sys%0A%0Aimport matplotlib.pyplot as plt%0Aimport numpy as np%0Aimport sklearn%0Aimport sklearn.gaussian_process.kernels%0A%0A%0Akernel = (sklearn.gaussian_process.kernels.ConstantKernel()%0A + sklearn.gaussian_process.kernels.Matern(length_scale=2, nu=3/2)%0A + sklearn.gaussian_process.kernels.WhiteKernel(noise_level=1))%0A%0ALABEL_COL = 4%0AINPUT_COLS = 7, 9, 11, 13, 15%0AINPUT_DIM = len(INPUT_COLS)%0AINPUT_ROW_VALID = lambda row: row%5B2%5D == %22Galaxy%22%0AINPUT_SAMPLES_NUM = 1000%0ATESTING_SAMPLES_NUM = 1000%0APLOT_SAMPLES = 1000%0A%0A%0Adef take_samples(reader, num):%0A X = np.empty((num, INPUT_DIM))%0A y = np.empty((num,))%0A%0A i = 0%0A for row in reader:%0A if INPUT_ROW_VALID(row):%0A y%5Bi%5D = float(row%5BLABEL_COL%5D)%0A for j, col in enumerate(INPUT_COLS):%0A X%5Bi, j%5D = float(row%5Bcol%5D)%0A%0A i += 1%0A%0A if i == num:%0A break%0A else:%0A raise Exception(%22Not enough samples in file.%22)%0A%0A return X, y%0A%0A%0Adef main(path):%0A with open(path) as f:%0A reader = csv.reader(f)%0A next(reader) # Skip headers%0A%0A X, y = take_samples(reader, INPUT_SAMPLES_NUM)%0A test_X, test_y = take_samples(reader, TESTING_SAMPLES_NUM)%0A%0A gp = sklearn.gaussian_process.GaussianProcessRegressor(kernel=kernel)%0A gp.fit(X, y)%0A%0A if False:%0A X_pred = np.empty((PRED_DATA, INPUT_DIM))%0A X_pred%5B:, :4%5D = np.mean(X%5B:, :4%5D, axis=0)%0A X_pred%5B:, 4%5D = np.linspace(np.min(X%5B:, 4%5D), np.max(X%5B:, 4%5D), num=PRED_DATA)%0A%0A y_pred, sigmas = gp.predict(X_pred, return_std=True)%0A plt.plot(X%5B:, 4%5D, y, %22ro%22, markersize=0.5)%0A plt.errorbar(X_pred%5B:, 4%5D, y_pred, yerr=sigmas, capsize=0)%0A%0A plt.show()%0A%0A print(%22Score: %7B%7D%22.format(gp.score(test_X, test_y)))%0A%0A%0Aif __name__ == '__main__':%0A if len(sys.argv) != 2:%0A raise ValueError()%0A main(sys.argv%5B1%5D)%0A
c4aca4fe1bf02286f218ca855a41e380987818f7
Add test example
fcap/tests/test_example.py
fcap/tests/test_example.py
Python
0.000002
@@ -0,0 +1,155 @@ +import unittest%0A%0A%0Aclass BasicTestSuite(unittest.TestCase):%0A %22%22%22Basic test cases.%22%22%22%0A%0A def test_absolute_truth_and_meaning(self):%0A assert True%0A
fa27978c50364c903e2c343560f66db6ddc76bdb
add setup.py
play_tox/setup.py
play_tox/setup.py
Python
0.000001
@@ -0,0 +1,45 @@ +from setuptools import setup%0Asetup(name=%22x%22)%0A
6c4edaefe30905f62b885b931a1c5ca6d65cd220
Add tests for project model
server/tests/models/test_project.py
server/tests/models/test_project.py
Python
0
@@ -0,0 +1,2113 @@ +from server.models import Project%0Afrom server.tests.helpers import fixtures, FlaskTestCase%0A%0A%0Aclass TestProject(FlaskTestCase):%0A @fixtures('single_project.json')%0A def test_get_single_owner(self):%0A %22%22%22Test getting single project owner%0A %22%22%22%0A with self.flaskapp.test_request_context():%0A project = Project.query.filter_by(id=1).first()%0A owners = project.get_owners()%0A assert len(owners) is 1%0A%0A @fixtures('many_owners.json')%0A def test_get_many_owners(self):%0A %22%22%22Test getting multiple project owners%0A %22%22%22%0A with self.flaskapp.test_request_context():%0A project = Project.query.filter_by(id=1).first()%0A owners = project.get_owners()%0A assert len(owners) is 3%0A%0A @fixtures('single_contributer.json')%0A def test_get_single_contributer(self):%0A %22%22%22Test getting single contributer%0A %22%22%22%0A with self.flaskapp.test_request_context():%0A project = Project.query.filter_by(id=1).first()%0A contributers = project.get_contributers()%0A assert len(contributers) is 1%0A%0A%0A @fixtures('many_contributers.json')%0A def test_get_many_contributers(self):%0A %22%22%22Test getting many contributers%0A %22%22%22%0A with self.flaskapp.test_request_context():%0A project = Project.query.filter_by(id=1).first()%0A contributers = project.get_contributers()%0A assert len(contributers) is 3%0A%0A @fixtures('single_designer.json')%0A def test_get_single_designer(self):%0A %22%22%22Test getting single designer%0A %22%22%22%0A with self.flaskapp.test_request_context():%0A project = Project.query.filter_by(id=1).first()%0A designers = project.get_designers()%0A assert len(designers) is 1%0A%0A @fixtures('many_designers.json')%0A def test_get_many_designers(self):%0A %22%22%22Test getting many designers%0A %22%22%22%0A with self.flaskapp.test_request_context():%0A project = Project.query.filter_by(id=1).first()%0A designers = project.get_designers()%0A assert len(designers) is 2
ee62d6a972e5af72fc9a5e2e36d1a7822a1703af
Add sample on veh handle setup in remote process
samples/remote_veh_segv.py
samples/remote_veh_segv.py
Python
0
@@ -0,0 +1,1057 @@ +import windows%0Aimport windows.test%0A%0Afrom windows.generated_def.winstructs import *%0A%0A#c = windows.test.pop_calc_64()%0A%0A%0Ac = windows.test.pop_calc_64(dwCreationFlags=CREATE_SUSPENDED)%0A%0A%0Apython_code = %22%22%22%0Aimport windows%0Aimport ctypes%0Aimport windows%0Afrom windows.vectored_exception import VectoredException%0Aimport windows.generated_def.windef as windef%0Afrom windows.generated_def.winstructs import *%0A%0Awindows.utils.create_console()%0A%0A@VectoredException%0Adef handler(exc):%0A print(%22POUET%22)%0A if exc%5B0%5D.ExceptionRecord%5B0%5D.ExceptionCode == EXCEPTION_ACCESS_VIOLATION:%0A target_addr = ctypes.cast(exc%5B0%5D.ExceptionRecord%5B0%5D.ExceptionInformation%5B1%5D, ctypes.c_void_p).value%0A print(%22Instr at %7B0%7D accessed to addr %7B1%7D%22.format(hex(exc%5B0%5D.ExceptionRecord%5B0%5D.ExceptionAddress), hex(target_addr)))%0A #return windef.EXCEPTION_CONTINUE_EXECUTION%0A return windef.EXCEPTION_CONTINUE_SEARCH%0A%0A%0Awindows.winproxy.AddVectoredExceptionHandler(0, handler)%0Aprint(%22YOLO%3C3%22)%0Aprint(ctypes.c_uint.from_address(0x42424242).value)%0A%22%22%22%0A%0A%0Ax = c.execute_python(python_code)
0e5ba1d9ae7ca7d5439d886abe732f0fcebed49b
Create classes.py
classes.py
classes.py
Python
0.000001
@@ -0,0 +1,22 @@ +class String(object):%0A
e29bdc567c3d1f04f9e9ec17792052b0f66f918e
reorder users migration
apostello/migrations/0010_auto_20160421_1411.py
apostello/migrations/0010_auto_20160421_1411.py
Python
0.000002
@@ -0,0 +1,435 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.5 on 2016-04-21 13:11%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('apostello', '0009_userprofile_message_cost_limit'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='userprofile',%0A options=%7B'ordering': %5B'user__email'%5D%7D,%0A ),%0A %5D%0A
c97509b510fb13fffdfbc490146b3940c62b3db9
too soon. sorry
src/pyechonest/decorators.py
src/pyechonest/decorators.py
Python
0.998673
@@ -0,0 +1,852 @@ +# from http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize%0Aclass memoized(object):%0A %22%22%22Decorator that caches a function's return value each time it is called.%0A If called later with the same arguments, the cached value is returned, and%0A not re-evaluated.%0A %22%22%22%0A def __init__(self, func):%0A self.func = func%0A self.cache = %7B%7D%0A %0A def __call__(self, *args):%0A try:%0A return self.cache%5Bargs%5D%0A except KeyError:%0A self.cache%5Bargs%5D = value = self.func(*args)%0A return value%0A except TypeError:%0A # uncachable -- for instance, passing a list as an argument.%0A # Better to not cache than to blow up entirely.%0A return self.func(*args)%0A %0A def __repr__(self):%0A %22%22%22Return the function's docstring.%22%22%22%0A return self.func.__doc__%0A
a519b7c91a8ea84549efcdf145aed56cf89b9d59
Create users.py
app/api_1_0/users.py
app/api_1_0/users.py
Python
0.000001
@@ -0,0 +1,1786 @@ +from flask import jsonify, request, current_app, url_for%0Afrom . import api%0Afrom ..models import User, Post%0A%0A%0A@api.route('/users/%3Cint:id%3E')%0Adef get_user(id):%0A user = User.query.get_or_404(id)%0A return jsonify(user.to_json())%0A%0A%0A@api.route('/users/%3Cint:id%3E/posts/')%0Adef get_user_posts(id):%0A user = User.query.get_or_404(id)%0A page = request.args.get('page', 1, type=int)%0A pagination = user.posts.order_by(Post.timestamp.desc()).paginate(%0A page, per_page=current_app.config%5B'FLASKY_POSTS_PER_PAGE'%5D,%0A error_out=False)%0A posts = pagination.items%0A prev = None%0A if pagination.has_prev:%0A prev = url_for('api.get_user_posts', page=page-1, _external=True)%0A next = None%0A if pagination.has_next:%0A next = url_for('api.get_user_posts', page=page+1, _external=True)%0A return jsonify(%7B%0A 'posts': %5Bpost.to_json() for post in posts%5D,%0A 'prev': prev,%0A 'next': next,%0A 'count': pagination.total%0A %7D)%0A%0A%0A@api.route('/users/%3Cint:id%3E/timeline/')%0Adef get_user_followed_posts(id):%0A user = User.query.get_or_404(id)%0A page = request.args.get('page', 1, type=int)%0A pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(%0A page, per_page=current_app.config%5B'FLASKY_POSTS_PER_PAGE'%5D,%0A error_out=False)%0A posts = pagination.items%0A prev = None%0A if pagination.has_prev:%0A prev = url_for('api.get_user_followed_posts', page=page-1,%0A _external=True)%0A next = None%0A if pagination.has_next:%0A next = url_for('api.get_user_followed_posts', page=page+1,%0A _external=True)%0A return jsonify(%7B%0A 'posts': %5Bpost.to_json() for post in posts%5D,%0A 'prev': prev,%0A 'next': next,%0A 'count': pagination.total%0A %7D)%0A
52a8a1cd093f8bdbaf0abfc85eff2d3682e24b12
Add Python script for questions linting
scripts/check-questions.py
scripts/check-questions.py
Python
0.000001
@@ -0,0 +1,2182 @@ +#!/usr/bin/env python3%0A%0A%0Aimport os%0Aimport sys%0Aimport json%0Aimport collections%0Aimport unicodedata%0A%0A%0ATEXT_FIELD = %22t%22%0AOPTIONS_FIELD = %22o%22%0AKIND_FIELD = %22k%22%0ACORRECT_FIELD = %22c%22%0AMANDATORY_FIELDS = %7BTEXT_FIELD, OPTIONS_FIELD, CORRECT_FIELD%7D%0A%0A%0Adef norm(s):%0A return unicodedata.normalize(%22NFD%22, s)%0A%0A%0Adef error(message, *, n):%0A raise ValueError(%22 %22.join((message, %22(%7B%7D)%22.format(n))))%0A%0A%0Adef check(questions):%0A text_occurences = collections.defaultdict(list)%0A%0A for n, question in enumerate(questions, start=1):%0A # Contains mandatory fields.%0A missing = MANDATORY_FIELDS - set(question.keys())%0A if missing:%0A error(%22missing %7B%7D%22.format(%22, %22.join(missing)), n=n)%0A%0A text_occurences%5Bnorm(question%5BTEXT_FIELD%5D)%5D.append(n)%0A%0A # Kind, if present, is %22tr%22.%0A if KIND_FIELD in question and question%5BKIND_FIELD%5D != %22tr%22:%0A error(%22%7B%7D != tr%22.format(KIND_FIELD), n=n)%0A%0A # There are at least four options & they are unique.%0A options = tuple(map(norm, question%5BOPTIONS_FIELD%5D))%0A options_count = len(options)%0A if len(set(options)) != options_count or options_count %3C 4:%0A error(%22%3E= 4 unique options are required%22, n=n)%0A%0A # There is at least one correct index.%0A correct = question%5BCORRECT_FIELD%5D%0A if len(correct) %3C 1:%0A error(%22%3E= 1 correct index is required%22, n=n)%0A for index in correct:%0A try:%0A options%5Bindex%5D%0A except IndexError:%0A error(%22index %7B%7D is not adressable%22.format(index), n=n)%0A%0A # Text is not repeated.%0A for text, ns in text_occurences.items():%0A if len(ns) %3E 1:%0A error(%0A %22t %7B%7D is repeated at %7B%7D%22.format(%0A text, %22, %22.join(map(str, ns%5B1:%5D))), n=ns%5B0%5D)%0A%0A%0Adef main():%0A questions_path = os.path.normpath(%0A os.path.join(%0A os.path.dirname(__file__), %22..%22, %22src%22, %22questions.json%22))%0A with open(questions_path, %22r%22, encoding=%22utf-8%22) as file:%0A questions = json.load(file)%0A try:%0A check(questions)%0A except ValueError as e:%0A print(e, file=sys.stderr)%0A exit(1)%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
3a662b5820ea90c0cd63116a610ede25558c5562
Add tests directory to sourceterm package and start test module.
sourceterm/tests/test_srcequations.py
sourceterm/tests/test_srcequations.py
Python
0
@@ -0,0 +1,230 @@ +'''%0ACreated on 25 Aug 2010%0A%0A@author: ith%0A'''%0Aimport unittest%0A%0A%0Aclass Test(unittest.TestCase):%0A%0A%0A def testName(self):%0A pass%0A%0A%0Aif __name__ == %22__main__%22:%0A #import sys;sys.argv = %5B'', 'Test.testName'%5D%0A unittest.main()
150a13e1854a93f9330349486717588900c05c6d
Fix imdb min_score schema to allow numbers
flexget/plugins/filter/imdb.py
flexget/plugins/filter/imdb.py
from __future__ import unicode_literals, division, absolute_import import logging from flexget.plugin import register_plugin, get_plugin_by_name, PluginError, priority from flexget.utils.log import log_once log = logging.getLogger('imdb') class FilterImdb(object): """ This plugin allows filtering based on IMDB score, votes and genres etc. Note: All parameters are optional. Some are mutually exclusive. Configuration:: min_score: <num> min_votes: <num> min_year: <num> max_year: <num> # reject if genre contains any of these reject_genres: - genre1 - genre2 # reject if language contain any of these reject_languages: - language1 # accept only these primary languages accept_languages: - language1 # accept movies with any of these actors accept_actors: - nm0004695 - nm0004754 # reject movie if it has any of these actors reject_actors: - nm0001191 - nm0002071 # accept all movies by these directors accept_directors: - nm0000318 # reject movies by these directors reject_directors: - nm0093051 # reject movies/TV shows with any of these ratings reject_mpaa_ratings: - PG_13 - R - X # accept movies/TV shows with only these ratings accept_mpaa_ratings: - PG - G - TV_Y """ schema = { 'type': 'object', 'properties': { 'min_year': {'type': 'integer'}, 'max_year': {'type': 'integer'}, 'min_votes': {'type': 'integer'}, 'min_score': {'type': 'integer'}, 'reject_genres': {'type': 'array', 'items': {'type': 'string'}}, 'reject_languages': {'type': 'array', 'items': {'type': 'string'}}, 'accept_languages': {'type': 'array', 'items': {'type': 'string'}}, 'reject_actors': {'type': 'array', 'items': {'type': 'string'}}, 'accept_actors': {'type': 'array', 'items': {'type': 'string'}}, 'reject_directors': {'type': 'array', 'items': {'type': 'string'}}, 'accept_directors': {'type': 'array', 'items': {'type': 'string'}}, 'reject_mpaa_ratings': {'type': 'array', 'items': {'type': 'string'}}, 'accept_mpaa_ratings': {'type': 'array', 'items': {'type': 'string'}} }, 'additionalProperties': False } # Run later to avoid unnecessary lookups @priority(120) def on_task_filter(self, task, config): lookup = get_plugin_by_name('imdb_lookup').instance.lookup # since the plugin does not reject anything, no sense going trough accepted for entry in task.undecided: force_accept = False try: lookup(entry) except PluginError as e: # logs skip message once trough log_once (info) and then only when ran from cmd line (w/o --cron) msg = 'Skipping %s because of an error: %s' % (entry['title'], e.value) if not log_once(msg, logger=log): log.verbose(msg) continue #for key, value in entry.iteritems(): # log.debug('%s = %s (type: %s)' % (key, value, type(value))) # Check defined conditions, TODO: rewrite into functions? reasons = [] if 'min_score' in config: if entry.get('imdb_score', 0) < config['min_score']: reasons.append('min_score (%s < %s)' % (entry.get('imdb_score'), config['min_score'])) if 'min_votes' in config: if entry.get('imdb_votes', 0) < config['min_votes']: reasons.append('min_votes (%s < %s)' % (entry.get('imdb_votes'), config['min_votes'])) if 'min_year' in config: if entry.get('imdb_year', 0) < config['min_year']: reasons.append('min_year (%s < %s)' % (entry.get('imdb_year'), config['min_year'])) if 'max_year' in config: if entry.get('imdb_year', 0) > config['max_year']: reasons.append('max_year (%s > %s)' % (entry.get('imdb_year'), config['max_year'])) if 'reject_genres' in config: rejected = config['reject_genres'] for genre in entry.get('imdb_genres', []): if genre in rejected: reasons.append('reject_genres') break if 'reject_languages' in config: rejected = config['reject_languages'] for language in entry.get('imdb_languages', []): if language in rejected: reasons.append('reject_languages') break if 'accept_languages' in config: accepted = config['accept_languages'] if entry.get('imdb_languages') and entry['imdb_languages'][0] not in accepted: # Reject if the first (primary) language is not among acceptable languages reasons.append('accept_languages') if 'reject_actors' in config: rejected = config['reject_actors'] for actor_id, actor_name in entry.get('imdb_actors', {}).iteritems(): if actor_id in rejected or actor_name in rejected: reasons.append('reject_actors %s' % actor_name or actor_id) break # Accept if actors contains an accepted actor, but don't reject otherwise if 'accept_actors' in config: accepted = config['accept_actors'] for actor_id, actor_name in entry.get('imdb_actors', {}).iteritems(): if actor_id in accepted or actor_name in accepted: log.debug('Accepting because of accept_actors %s' % actor_name or actor_id) force_accept = True break if 'reject_directors' in config: rejected = config['reject_directors'] for director_id, director_name in entry.get('imdb_directors', {}).iteritems(): if director_id in rejected or director_name in rejected: reasons.append('reject_directors %s' % director_name or director_id) break # Accept if the director is in the accept list, but do not reject if the director is unknown if 'accept_directors' in config: accepted = config['accept_directors'] for director_id, director_name in entry.get('imdb_directors', {}).iteritems(): if director_id in accepted or director_name in accepted: log.debug('Accepting because of accept_directors %s' % director_name or director_id) force_accept = True break if 'reject_mpaa_ratings' in config: rejected = config['reject_mpaa_ratings'] if entry.get('imdb_mpaa_rating') in rejected: reasons.append('reject_mpaa_ratings %s' % entry['imdb_mpaa_rating']) if 'accept_mpaa_ratings' in config: accepted = config['accept_mpaa_ratings'] if entry.get('imdb_mpaa_rating') not in accepted: reasons.append('accept_mpaa_ratings %s' % entry.get('imdb_mpaa_rating')) if reasons and not force_accept: msg = 'Didn\'t accept `%s` because of rule(s) %s' % \ (entry.get('imdb_name', None) or entry['title'], ', '.join(reasons)) if task.manager.options.debug: log.debug(msg) else: if task.manager.options.quiet: log_once(msg, log) else: log.info(msg) else: log.debug('Accepting %s' % (entry['title'])) entry.accept() register_plugin(FilterImdb, 'imdb', api_ver=2)
Python
0.000015
@@ -1693,37 +1693,36 @@ ore': %7B'type': ' -integ +numb er'%7D,%0A
3d11921f67c1928bb79869c3af1f8836360219fd
Add SIF assembler for Boolean network generation
indra/assemblers/sif_assembler.py
indra/assemblers/sif_assembler.py
Python
0
@@ -0,0 +1,2819 @@ +import networkx as nx%0Afrom indra.statements import *%0A%0Aclass SifAssembler(object):%0A def __init__(self, stmts=None):%0A if stmts is None:%0A self.stmts = %5B%5D%0A else:%0A self.stmts = stmts%0A self.graph = nx.DiGraph()%0A self.nodes = %7B%7D%0A%0A def make_model(self):%0A for st in self.stmts:%0A if isinstance(st, Activation):%0A s = self.add_node(st.subj)%0A t = self.add_node(st.obj)%0A if st.is_activation:%0A self.add_edge(s, t, %7B'polarity': 'positive'%7D)%0A else:%0A self.add_edge(s, t, %7B'polarity': 'negative'%7D)%0A%0A def print_boolean_net(self, out_file=None):%0A init_str = ''%0A for node_key in self.graph.nodes():%0A node_name = self.graph.node%5Bnode_key%5D%5B'name'%5D%0A init_str += '%25s = False%5Cn' %25 node_name%0A rule_str = ''%0A for node_key in self.graph.nodes():%0A node_name = self.graph.node%5Bnode_key%5D%5B'name'%5D%0A in_edges = self.graph.in_edges(node_key)%0A if not in_edges:%0A continue%0A parents = %5Be%5B0%5D for e in in_edges%5D%0A polarities = %5Bself.graph.edge%5Be%5B0%5D%5D%5Bnode_key%5D%5B'polarity'%5D%0A for e in in_edges%5D%0A pos_parents = %5Bpar for par, pol in zip(parents, polarities) if%0A pol == 'positive'%5D%0A neg_parents = %5Bpar for par, pol in zip(parents, polarities) if%0A pol == 'negative'%5D%0A%0A rhs_pos_parts = %5B%5D%0A for par in pos_parents:%0A rhs_pos_parts.append(self.graph.node%5Bpar%5D%5B'name'%5D)%0A rhs_pos_str = ' or '.join(rhs_pos_parts)%0A%0A rhs_neg_parts = %5B%5D%0A for par in neg_parents:%0A rhs_neg_parts.append(self.graph.node%5Bpar%5D%5B'name'%5D)%0A rhs_neg_str = ' or '.join(rhs_neg_parts)%0A%0A if rhs_pos_str:%0A if rhs_neg_str:%0A rhs_str = '(' + rhs_pos_str + %5C%0A ') and not (' + rhs_neg_str + ')'%0A else:%0A rhs_str = rhs_pos_str%0A else:%0A rhs_str = 'not (' + rhs_neg_str + ')'%0A%0A node_eq = '%25s* = %25s%5Cn' %25 (node_name, rhs_str)%0A rule_str += node_eq%0A full_str = init_str + '%5Cn' + rule_str%0A if out_file is not None:%0A with open(out_file, 'wt') as fh:%0A fh.write(full_str)%0A return full_str%0A%0A def add_node(self, agent):%0A node_key = agent.matches_key()%0A self.graph.add_node(node_key, name=agent.name)%0A return node_key%0A%0A def add_edge(self, s, t, edge_attributes=None):%0A if edge_attributes is None:%0A self.graph.add_edge(s, t)%0A else:%0A self.graph.add_edge(s, t, edge_attributes)%0A
732bee4960cb2448d829f5b30c114958a3ebbab2
Generate QR
genqr.py
genqr.py
Python
0.999999
@@ -0,0 +1,1496 @@ +#!/usr/bin/env python%0A'''Generate QR code using Google Charts API'''%0A%0Aimport sys%0A# Python 3/2 compatibility%0Aif sys.version_info%5B:2%5D %3C (3, 0):%0A from urllib import urlopen, urlencode%0A import httplib%0A stdout = sys.stdout%0Aelse:%0A from urllib.request import urlopen%0A from urllib.parse import urlencode%0A import http.client as httplib%0A stdout = sys.stdout.buffer%0A%0A%0Adef gen_qr(data, size):%0A charts_url = 'https://chart.googleapis.com/chart'%0A params = %5B%0A ('cht', 'qr'),%0A ('chs', size),%0A ('chl', data),%0A %5D%0A query = urlencode(params)%0A url = '%25s?%25s' %25 (charts_url, query)%0A fo = urlopen(url)%0A if fo.code != httplib.OK:%0A raise ValueError('bad reply from Google %25d' %25 fo.code)%0A return fo.read()%0A%0A%0Aif __name__ == '__main__':%0A%0A from argparse import ArgumentParser%0A parser = ArgumentParser(%0A description='Generate QR using Google Charts (PNG)')%0A parser.add_argument('data', help='data to encode')%0A parser.add_argument('--out', '-o', help='output file name (stdin)',%0A default='-')%0A parser.add_argument('--size', '-s', help='image size (200x200)',%0A default='200x200')%0A args = parser.parse_args()%0A%0A img_data = gen_qr(args.data, args.size)%0A try:%0A out = stdout if args.out == '-' else open(args.out, 'wb')%0A out.write(img_data)%0A except IOError as err:%0A raise SystemExit(%0A 'error: cannot open %7B%7D for writing - %7B%7D'.format(args.out, err))%0A
f552979125531fade029bc8baa51e2d0bb9dd320
Simplify retrieving of config home in core init method
powerline/core.py
powerline/core.py
# -*- coding: utf-8 -*- import importlib import json import os import sys from colorscheme import Colorscheme from theme import Theme class Powerline(object): def __init__(self, ext): try: config_home = os.environ['XDG_CONFIG_HOME'] except KeyError: config_home = os.path.expanduser('~/.config') config_path = os.path.join(config_home, 'powerline') plugin_path = os.path.realpath(os.path.dirname(__file__)) self.search_paths = [config_path, plugin_path] sys.path[:0] = self.search_paths # Load main config file config = self._load_json_config('config') self.config = config['common'] self.config_ext = config['ext'][ext] # Load and initialize colorscheme colorscheme_config = self._load_json_config(os.path.join('colorschemes', self.config_ext['colorscheme'])) colorscheme = Colorscheme(colorscheme_config) # Load and initialize extension theme theme_config = self._load_json_config(os.path.join('themes', ext, self.config_ext['theme'])) self.theme = Theme(ext, colorscheme, theme_config, self.config) # Load and initialize extension renderer renderer_module_name = 'powerline.ext.{0}.renderer'.format(ext) renderer_class_name = '{0}Renderer'.format(ext.capitalize()) renderer_class = getattr(importlib.import_module(renderer_module_name), renderer_class_name) self.renderer = renderer_class(self.theme) def _load_json_config(self, config_file): config_file += '.json' for path in self.search_paths: config_file_path = os.path.join(path, config_file) if os.path.isfile(config_file_path): with open(config_file_path, 'rb') as config_file_fp: return json.load(config_file_fp) raise IOError('Config file not found in search path: {0}'.format(config_file))
Python
0.000001
@@ -186,16 +186,8 @@ t):%0A -%09%09try:%0A%09 %09%09co @@ -208,17 +208,21 @@ .environ -%5B +.get( 'XDG_CON @@ -234,45 +234,9 @@ OME' -%5D%0A%09%09except KeyError:%0A%09%09%09config_home = +, os. @@ -263,16 +263,17 @@ config') +) %0A%0A%09%09conf
5999d8b572d8f28fc4fee0826660a40ec108d15b
Create trimfile.py
bin/preprocess/trimfile.py
bin/preprocess/trimfile.py
Python
0.000001
@@ -0,0 +1,53 @@ +#!/usr/bin/python%0A%0A%0A%0A%0A%0A%0A%0Adef trimOne():%0A%0A%0A return 0%0A
d268c5870623b1c5f6da202264cb1b399f037ec8
Create rename.py
rename.py
rename.py
Python
0.000003
@@ -0,0 +1,683 @@ +import sys%0Aimport os%0A%0Aif len ( sys.argv ) == 4:%0A%09args = sys.argv %5B 1: %5D%0Aelse:%0A%09print 'Usage: python rename.py %5Bpath%5D'%0A%09sys.exit ( 0 )%0A%0Apath = '.'%0Afilenames = os.listdir ( str ( path ) )%0A%0A# Check some things%0Atext = open ( args %5B 0 %5D, 'r' ).read ( )%0Aoriginal_names = text.split ( '%5Cn' )%0A%0Atext = open ( args %5B 1 %5D, 'r' ).read ( )%0Anew_names = text.split ( '%5Cn' )%0A%0Aall_names = %5B %5D%0Aall_names.extend ( original_names )%0Aall_names.extend ( new_names )%0Aif len ( all_names ) != len ( set ( all_names ) ):%0A%09print 'Something is incorrect. Maybe duplicated names.'%0A%09sys.exit ( 0 )%0A%0Afor pair in zip ( original_names, new_names ):%0A%09if pair %5B 0 %5D in filenames:%0A%09%09os.rename ( pair %5B 0 %5D, pair %5B 1 %5D )%0A
8e61c18d23812a70d65ec42d7c36c5f1b7ed829d
add script for 50bp window gff summary.
scripts/summarize_gff50.py
scripts/summarize_gff50.py
Python
0
@@ -0,0 +1,1157 @@ +import sys%0Aimport os.path as op%0Asys.path.insert(0, %22/home/brentp/src/methylcode/code/%22)%0A%0Afrom methyl import MethylGroup%0Aprefix = sys.argv%5B1%5D # something like: out1234n/thaliana_v9%0Aacontext = sys.argv%5B2%5D # CHH or CHG or CG%0Awindow = 50%0A%0Amg = MethylGroup(prefix)%0A%0A%0Afh = open(mg.dir + mg.prefix + %22.test.%25ibp.%25s.gff%22 %25 (window, acontext), %22w%22)%0Aprint %3E%3Esys.stderr, %22writing to %25s%22 %25 (fh.name, )%0Aprint %3E%3Efh, %22##gff-version 3%22%0Asys.argv%5B1%5D = op.abspath(sys.argv%5B1%5D)%0Aprint %3E%3Efh, %22#%25s%22 %25 %22 %22.join(sys.argv)%0A%0Afor chr, m in mg.iteritems():%0A %0A cs, ts, mask = m.as_context(acontext)%0A bp_max = len(ts)%0A for start in range(0, bp_max + 1, window):%0A end = min(start + window, bp_max)%0A t_count = ts%5Bstart:end%5D.sum()%0A c_count = cs%5Bstart:end%5D.sum()%0A%0A n = mask%5Bstart:end%5D.sum()%0A%0A if c_count + t_count == 0:%0A plot = methyl = 0.0%0A else:%0A plot = methyl = c_count / float(c_count + t_count)%0A strand = %22.%22 %0A plot = %22%25.3g%22 %25 plot%0A%0A attrs=%22c=%25i;t=%25i;n=%25i%22 %25 (c_count, t_count, n)%0A print %3E%3Efh, %22%5Ct%22.join(map(str, %5Bchr, sys.argv%5B0%5D, %22dmc%22, start + 1, end, plot, strand, %22.%22, attrs%5D))%0A
a3d79e11fea266a2996b588a9cbbe6272722ea8b
Fix a bug caught by counter unittesting
sparts/counters.py
sparts/counters.py
# Copyright (c) 2014, Facebook, Inc. All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # """Module for implementing time-series counters.""" from __future__ import absolute_import from collections import deque from functools import partial from sparts.sparts import _Nameable, _Bindable, ProvidesCounters import time class SampleType: """Pass an array of these in the `types` paremeter to `sample()`""" COUNT = 'count' SUM = 'sum' AVG = 'avg' AVERAGE = 'avg' MAX = 'max' MIN = 'min' class _BaseCounter(_Nameable, _Bindable, ProvidesCounters): """Base type for counter-like things""" suffix = 'UNDEF' def __init__(self, name=None): super(_BaseCounter, self).__init__(name) self._initialize() def _bind(self, obj): return self.__class__(name=self.name) def _initialize(self): raise NotImplementedError() def _genCounterCallbacks(self): """Return this counter's (name, value)""" yield self.name, self def getvalue(self): raise NotImplementedError() def add(self, value): raise NotImplementedError() def __call__(self): return self.getvalue() def __int__(self): return int(self.getvalue()) def __float__(self): return float(self.getvalue()) def __str__(self): v = self.getvalue() if v is None: return '__None__' return str(v) class ValueCounter(_BaseCounter): """Base type for counter-like things that have a `._value`""" DEFAULT_VALUE = 0.0 def _initialize(self, value=None): self._value = value or self.DEFAULT_VALUE def getvalue(self): return self._value class Sum(ValueCounter): """A running total""" suffix = SampleType.SUM def add(self, value): self._value += value def increment(self): self.add(1.0) def incrementBy(self, value): self.add(value) def reset(self, value=0): self._value = value counter = Sum class Count(ValueCounter): """A running count""" suffix = SampleType.COUNT DEFAULT_VALUE = 0 def add(self, value): self._value += 1 class Average(_BaseCounter): """A running average""" suffix = SampleType.AVERAGE def _initialize(self): self._total = 0.0 self._count = 0 def add(self, value): # TODO: Re-use sibling total/count counters if present # not sure how to do this sensibly self._total += value self._count += 1 def getvalue(self): if self._count == 0: return None return self._total / self._count class Max(ValueCounter): """A running maximum""" suffix = SampleType.MAX DEFAULT_VALUE = None def add(self, value): if self._value is None: self._value = value elif value > self._value: self._value = value class Min(ValueCounter): """A running minimum""" suffix = SampleType.MIN DEFAULT_VALUE = None def add(self, value): if self._value is None: self._value = value elif value < self._value: self._value = value def getvalue(self): return self._value # TODO: Percentiles!! # Lookup for mapping SampleTypes to their respective classes _SampleMethod = { SampleType.COUNT: Count, SampleType.SUM: Sum, SampleType.AVERAGE: Average, SampleType.MAX: Max, SampleType.MIN: Min, } class Samples(_Nameable, _Bindable, ProvidesCounters): """`samples` are used to generate series of counters dynamically This is so you can say, keep track of the average duration of some event for the last minute, hour, day, etc, and export these as 4 separate counters. """ def __init__(self, types=None, windows=None, name=None): super(Samples, self).__init__(name) self.types = types or [SampleType.AVERAGE] # minutely, hourly self.windows = sorted(windows or [60, 3600]) self.max_window = max(self.windows) self.samples = deque() self.dirty = True self._prev_counters = {} self._prev_time = None def _bind(self, obj): return self.__class__(types=self.types, windows=self.windows, name=self.name) def _genCounterCallbacks(self): """Yield all the child counters.""" for subcounter in self.iterkeys(): yield subcounter, partial(self.getCounter, subcounter) def _now(self): """Defined to allow unittest overriding""" return time.time() def add(self, value): now = self._now() self.samples.append((now, value)) # When adding samples, trim old ones. while now - self.max_window > self.samples[0][0]: self.samples.popleft() self.dirty = True # TODO: Handle "infinite" windows def getCounters(self): if self.dirty is False and self._prev_time == int(self._now()): return self._prev_counters ops = [] for type in self.types: ops.append(_SampleMethod[type]()) now = self._now() genwindows = iter(self.windows) window = genwindows.next() result = {} done = False def _saveCounterValues(window): """Re-usable helper function for setting results and continuing""" prefix = '' if self.name is not None: prefix = self.name + '.' for op in ops: result[prefix + op.suffix + '.' + str(window)] = \ op.getvalue() # Move to the next window try: return genwindows.next(), False except StopIteration: # We exhausted all our windows return None, True for ts, value in reversed(self.samples): # We exceeded the current window if now - window > ts: # Save counter values window, done = _saveCounterValues(window) if done: break for op in ops: op.add(value) # We exhausted the samples before the windows while not done: window, done = _saveCounterValues(window) self._prev_counters = result self._prev_time = int(now) self.dirty = False return result def getCounter(self, name, default=None): return self.getCounters().get(name, default) def iterkeys(self): for type in self.types: for window in self.windows: yield self.name + '.' + type + '.' + str(window) # TODO: Infinite Windows samples = Samples
Python
0
@@ -6160,18 +6160,34 @@ -if +while not done and now - w @@ -6290,28 +6290,25 @@ ues(window)%0A - +%0A @@ -6312,32 +6312,82 @@ if done:%0A + # TODO: %22prune%22 any remaining samples%0A
3dc9204c80f2f7be5f82200c059a6a62f02bf6c1
Update blogroll and social links.
www/pelicanconf.py
www/pelicanconf.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals AUTHOR = u'IPython development team and Enthought, Inc.' SITENAME = u'DistArray' SITEURL = '' PATH = 'content' TIMEZONE = 'America/Chicago' DEFAULT_LANG = u'en' # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None # Blogroll LINKS = (('Pelican', 'http://getpelican.com/'), ('Python.org', 'http://python.org/'), ('Jinja2', 'http://jinja.pocoo.org/'), ('You can modify those links in your config file', '#'),) # Social widget SOCIAL = (('You can add links in your config file', '#'), ('Another social link', '#'),) DEFAULT_PAGINATION = False # Uncomment following line if you want document-relative URLs when developing #RELATIVE_URLS = True
Python
0
@@ -407,15 +407,13 @@ ((' -Pelican +NumPy ', ' @@ -423,22 +423,21 @@ p:// -getpelican.com +www.numpy.org /'), @@ -452,18 +452,13 @@ (' -Python.org +SciPy ', ' @@ -468,19 +468,21 @@ p:// -python +www.scipy .org -/ '),%0A @@ -496,14 +496,15 @@ (' -Jinja2 +IPython ', ' @@ -514,19 +514,15 @@ p:// -jinja.pocoo +ipython .org @@ -541,175 +541,137 @@ (' -You can modify those links in your config file', '#'),)%0A%0A# Social widget%0ASOCIAL = (('You can add links in your config file', '#'),%0A ('Another social link', '# +Enthought', 'http://www.enthought.com/'),%0A )%0A%0A# Social widget%0ASOCIAL = (('github', 'https://github.com/enthought/distarray '),)
04b0ed7ae4966259aa1967dc7e5ebb093be583c8
Fix glance filesystem store race condition
glance/store/filesystem.py
glance/store/filesystem.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 OpenStack, LLC # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A simple filesystem-backed store """ import errno import hashlib import os import urlparse from glance.common import exception from glance.common import utils from glance.openstack.common import cfg import glance.openstack.common.log as logging import glance.store import glance.store.base import glance.store.location LOG = logging.getLogger(__name__) datadir_opt = cfg.StrOpt('filesystem_store_datadir') CONF = cfg.CONF CONF.register_opt(datadir_opt) class StoreLocation(glance.store.location.StoreLocation): """Class describing a Filesystem URI""" def process_specs(self): self.scheme = self.specs.get('scheme', 'file') self.path = self.specs.get('path') def get_uri(self): return "file://%s" % self.path def parse_uri(self, uri): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. """ pieces = urlparse.urlparse(uri) assert pieces.scheme in ('file', 'filesystem') self.scheme = pieces.scheme path = (pieces.netloc + pieces.path).strip() if path == '': reason = _("No path specified in URI: %s") % uri LOG.error(reason) raise exception.BadStoreUri('No path specified') self.path = path class ChunkedFile(object): """ We send this back to the Glance API server as something that can iterate over a large file """ CHUNKSIZE = 65536 def __init__(self, filepath): self.filepath = filepath self.fp = open(self.filepath, 'rb') def __iter__(self): """Return an iterator over the image file""" try: while True: chunk = self.fp.read(ChunkedFile.CHUNKSIZE) if chunk: yield chunk else: break finally: self.close() def close(self): """Close the internal file pointer""" if self.fp: self.fp.close() self.fp = None class Store(glance.store.base.Store): def get_schemes(self): return ('file', 'filesystem') def configure_add(self): """ Configure the Store to use the stored configuration options Any store that needs special configuration should implement this method. If the store was not able to successfully configure itself, it should raise `exception.BadStoreConfiguration` """ self.datadir = CONF.filesystem_store_datadir if self.datadir is None: reason = (_("Could not find %s in configuration options.") % 'filesystem_store_datadir') LOG.error(reason) raise exception.BadStoreConfiguration(store_name="filesystem", reason=reason) if not os.path.exists(self.datadir): msg = _("Directory to write image files does not exist " "(%s). Creating.") % self.datadir LOG.info(msg) try: os.makedirs(self.datadir) except IOError: reason = _("Unable to create datadir: %s") % self.datadir LOG.error(reason) raise exception.BadStoreConfiguration(store_name="filesystem", reason=reason) def get(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file, and returns a tuple of generator (for reading the image file) and image_size :param location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises `glance.exception.NotFound` if image does not exist """ loc = location.store_location filepath = loc.path if not os.path.exists(filepath): raise exception.NotFound(_("Image file %s not found") % filepath) else: msg = _("Found image at %s. Returning in ChunkedFile.") % filepath LOG.debug(msg) try: image_size = str(os.path.getsize(filepath)) except os.error: image_size = None return (ChunkedFile(filepath), image_size) def delete(self, location): """ Takes a `glance.store.location.Location` object that indicates where to find the image file to delete :location `glance.store.location.Location` object, supplied from glance.store.location.get_location_from_uri() :raises NotFound if image does not exist :raises Forbidden if cannot delete because of permissions """ loc = location.store_location fn = loc.path if os.path.exists(fn): try: LOG.debug(_("Deleting image at %(fn)s") % locals()) os.unlink(fn) except OSError: raise exception.Forbidden(_("You cannot delete file %s") % fn) else: raise exception.NotFound(_("Image file %s does not exist") % fn) def add(self, image_id, image_file, image_size): """ Stores an image file with supplied identifier to the backend storage system and returns an `glance.store.ImageAddResult` object containing information about the stored image. :param image_id: The opaque image identifier :param image_file: The image data to write, as a file-like object :param image_size: The size of the image data to write, in bytes :retval `glance.store.ImageAddResult` object :raises `glance.common.exception.Duplicate` if the image already existed :note By default, the backend writes the image data to a file `/<DATADIR>/<ID>`, where <DATADIR> is the value of the filesystem_store_datadir configuration option and <ID> is the supplied image ID. """ filepath = os.path.join(self.datadir, str(image_id)) if os.path.exists(filepath): raise exception.Duplicate(_("Image file %s already exists!") % filepath) checksum = hashlib.md5() bytes_written = 0 try: with open(filepath, 'wb') as f: for buf in utils.chunkreadable(image_file, ChunkedFile.CHUNKSIZE): bytes_written += len(buf) checksum.update(buf) f.write(buf) except IOError as e: if e.errno in [errno.EFBIG, errno.ENOSPC]: try: os.unlink(filepath) except Exception: msg = _('Unable to remove partial image data for image %s') LOG.error(msg % image_id) raise exception.StorageFull() elif e.errno == errno.EACCES: raise exception.StorageWriteDenied() else: raise checksum_hex = checksum.hexdigest() LOG.debug(_("Wrote %(bytes_written)d bytes to %(filepath)s with " "checksum %(checksum_hex)s") % locals()) return ('file://%s' % filepath, bytes_written, checksum_hex)
Python
0.000002
@@ -3880,16 +3880,325 @@ ept +( IOError -: +, OSError):%0A if os.path.exists(self.datadir):%0A # NOTE(markwash): If the path now exists, some other%0A # process must have beat us in the race condition. But it%0A # doesn't hurt, so we can safely ignore the error.%0A return %0A
e1e24a4fd232342dcca4151a0fa5aa3990ceb3b9
initial hello world
hello.py
hello.py
Python
0.999715
@@ -0,0 +1,14 @@ +print %22hello%22%0A
d80388591e3a55969688957b7c1bbd9bcda40296
Create social_feedback_counter.py compatible with hatebu, fb_like & tweet
social_feedback_counter.py
social_feedback_counter.py
Python
0
@@ -0,0 +1,1108 @@ +# coding:utf-8%0A%0Aimport urllib%0Aimport json%0A%0A%0Aclass SocialFeadbackCounter(object):%0A def __init__(self, url):%0A self.url = url%0A%0A def hatebu(self):%0A api_url = 'http://b.hatena.ne.jp/entry/json/' + self.url%0A hb_json = json.loads(urllib.urlopen(api_url).read(), encoding='utf-8')%0A%0A if hb_json is not None:%0A return int(hb_json%5B'count'%5D)%0A else: # In case 0, response is null(None)%0A return 0%0A%0A def fb_like(self):%0A api_url = 'http://graph.facebook.com/' + self.url%0A fb_json = json.loads(urllib.urlopen(api_url).read(), encoding='utf-8')%0A try:%0A return fb_json%5B'shares'%5D%0A except KeyError: # In case 0, 'share' key is not exsist in response json%0A return 0%0A%0A def tweet(self):%0A api_url = 'http://urls.api.twitter.com/1/urls/count.json?url=' + self.url%0A tw_json = json.loads(urllib.urlopen(api_url).read(), encoding='utf-8')%0A return tw_json%5B'count'%5D%0A%0A%0Aif __name__ == '__main__':%0A u = SocialFeadbackCounter('https://www.google.co.jp/')%0A print u.hatebu(), u.fb_like(), u.tweet()%0A
1866bb1ad5f5c4338c2173327d620e92c2ba5043
Create basic PodSixNet server
server.py
server.py
Python
0
@@ -0,0 +1,1001 @@ +from PodSixNet.Channel import Channel%0Afrom PodSixNet.Server import Server%0A%0Afrom time import sleep%0A%0A#Create the channel to deal with our incoming requests from the client%0A#A new channel is created every time a client connects%0Aclass ClientChannel(Channel):%0A%0A #Create a function that will respond to every request from the client%0A def Network(self, data):%0A%0A #Print the contents of the packet%0A print(data)%0A%0A#Create a new server for our game%0Adef GameServer(Server):%0A%0A #Set the channel to deal with incoming requests%0A channelClass = ClientChannel%0A%0A #Function to deal with new connections%0A def Connected(self, channel, addr):%0A print(%22New connection: %7B%7D%22.format(channel))%0A%0A#Start the server, but only if the file wasn't imported%0Aif __name__ == %22__main__%22:%0A%0A print(%22Server starting on LOCALHOST...%5Cn%22)%0A%0A #Create a server%0A s = GameServer()%0A%0A #Pump the server at regular intervals (check for new requests)%0A while True:%0A s.Pump()%0A sleep(0.0001)%0A
0cc0b16a6f29d31c3c2b3e2ad4eb313b010f7806
test addBuilds() method
errata_tool/tests/test_add_builds.py
errata_tool/tests/test_add_builds.py
Python
0.000001
@@ -0,0 +1,753 @@ +import requests%0A%0A%0Aclass TestAddBuilds(object):%0A%0A def test_add_builds_url(self, monkeypatch, mock_post, advisory):%0A monkeypatch.setattr(requests, 'post', mock_post)%0A advisory.addBuilds(%5B'ceph-10.2.3-17.el7cp'%5D, release='RHEL-7-CEPH-2')%0A assert mock_post.response.url == 'https://errata.devel.redhat.com/api/v1/erratum/26175/add_builds' # NOQA: E501%0A%0A def test_builds_data(self, monkeypatch, mock_post, advisory):%0A monkeypatch.setattr(requests, 'post', mock_post)%0A advisory.addBuilds(%5B'ceph-10.2.3-17.el7cp'%5D, release='RHEL-7-CEPH-2')%0A expected = %7B%0A %22product_version%22: %22RHEL-7-CEPH-2%22,%0A %22build%22: %22ceph-10.2.3-17.el7cp%22,%0A %7D%0A assert mock_post.kwargs%5B'json'%5D == %5Bexpected%5D%0A
fbbd6526612bbb450c5c4c1ecffd21e32f4c98c6
Add simple server
server.py
server.py
Python
0.000001
@@ -0,0 +1,216 @@ +import SimpleHTTPServer%0Aimport SocketServer%0A%0APORT = 8000%0A%0AHandler = SimpleHTTPServer.SimpleHTTPRequestHandler%0A%0Ahttpd = SocketServer.TCPServer(('', PORT), Handler)%0A%0Aprint %22serving at port%22, PORT%0Ahttpd.serve_forever()%0A
9e6bae8aa92ed0332efd689b6f43063b0569ef0a
add 16.py
16.py
16.py
Python
0.998462
@@ -0,0 +1,1450 @@ +%22%22%22Python challenge #16:%0Ahttp://www.pythonchallenge.com/pc/return/mozart.html%22%22%22%0Aimport urllib2%0Afrom PIL import Image%0Aurl = 'http://www.pythonchallenge.com/pc/return/mozart.gif'%0Aun = 'huge'%0Apw = 'file'%0Apink = (255, 0, 255)%0A%0A%0Adef main():%0A setup_auth_handler()%0A img = urllib2.urlopen(url)%0A im = Image.open(img)%0A rgb_im = im.convert('RGB')%0A seq = list(rgb_im.getdata())%0A output = %5B%5D%0A for count, x in enumerate(seq):%0A if x == pink:%0A if seq%5Bcount-1%5D != pink:%0A output.append(count)%0A print len(output)%0A for count, x in enumerate(output):%0A if count %3E 0:%0A print x-output%5Bcount-1%5D%0A%0A print len(output)%0A # matrix = rgb_im.load()%0A # for y in xrange(int(rgb_im.size%5B1%5D)):%0A # import pdb; pdb.set_trace()%0A # for x in xrange(int(rgb_im.size%5B0%5D)):%0A # print matrix%5Bx, y%5D%0A # with open('temp%7B%7D.jpg'.format(str(i)), %22wb%22) as file_handle:%0A # file_handle.write(file_content%5Bi::5%5D)%0A%0A(249, 249, 249)%0A(255, 0, 255)%0A(255, 0, 255)%0A(255, 0, 255)%0A(255, 0, 255)%0A(255, 0, 255)%0A(252, 252, 252)%0A%0Adef setup_auth_handler():%0A %22%22%22Method for setting up authentication.%22%22%22%0A password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()%0A password_mgr.add_password(None, url, un, pw)%0A handler = urllib2.HTTPBasicAuthHandler(password_mgr)%0A opener = urllib2.build_opener(handler)%0A urllib2.install_opener(opener)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
af320490aaa59d69faed9357d9690d945272bec5
add empty file to test Slack integration
A2.py
A2.py
Python
0
@@ -0,0 +1,25 @@ +#!/usr/bin/env python3%0A%0A%0A
31a74f1b9b50036a9b1de603f3437516eaef7807
Create pb.py
pb.py
pb.py
Python
0.000715
@@ -0,0 +1,156 @@ +print %22ParseBasic interpreter v0.1%22%0Aprint %22Copyright 2015 NETponents%22%0Aprint %22Licensed under MIT license%22%0Aprint %22Commercial use of this build is prohibited%22%0A
c4015ed868b65ce5c7ed660c84e252a950294642
Add basic functionality to query the (horrible) website.
r1.py
r1.py
Python
0
@@ -0,0 +1,1625 @@ +from datetime import date%0Aimport bs4%0Aimport itertools as it%0Aimport re%0Aimport requests%0A%0A%0Adef grouper(iterable, n, fillvalue=None):%0A args = %5Biter(iterable)%5D * n%0A return it.izip_longest(fillvalue=fillvalue, *args)%0A%0A%0Adef extract_name(bsitem):%0A return bsitem.find('span').text%0A%0A%0Adef extract_price(bsitem):%0A reg = re.compile(r'CHF (%5B%5Cd%5C.%5D+)')%0A return float(reg.findall(bsitem.text)%5B0%5D)%0A%0A%0Adef extract_table(response):%0A items = bs4.BeautifulSoup(response.text).find(%0A 'table',%0A class_='menuRestaurant').findAll('table',%0A class_='HauteurMenu')%0A return %5B(extract_name(i), extract_price(i)) for i in items%5B1::2%5D%5D%0A%0A%0Adef create_payload(page):%0A return %7B'fa_afficheSemaine_menurestaurant': 'Page %7B%7D'.format(page),%0A 'fn_changeType': 2,%0A 'fn_jourSemaine': '%7B%7D'.format(date.today()),%0A 'fn_limite': 2 * page - 1,%0A 'fn_refresh': 1,%0A 'fn_numpage': page%7D%0A%0A%0Adef split_days(items):%0A xs = %5Bgrouper(i, n) for i, n in zip(items, (3, 2, 2))%5D%0A return %5Blist(it.chain(*i)) for i in zip(*xs)%5D%0A%0A%0Adef get_menu():%0A URL1 = 'http://extranet.novae-restauration.ch/index.php?frame=1&x=d894ddae3c17b40b4fe7e16519f950f0&y=c7b3f79848b99a8e562a1df1d6285365&z=33'%0A URL2 = 'http://extranet.novae-restauration.ch/novae/traiteur/restauration/restaurant-cern.html?frame=1'%0A s = requests.Session()%0A return split_days(%5Bextract_table(s.get(URL1)), extract_table(%0A s.post(URL2,%0A data=create_payload(2))), extract_table(%0A s.post(URL2,%0A data=create_payload(3)))%5D)%0A
b49571316595b97ba602ac737f9c47ce682ac7b0
Solving 53
53.py
53.py
Python
0.999999
@@ -0,0 +1,128 @@ +class Solution:%0A # @param s : A string%0A # @return : A string%0A def reverseWords(self, s):%0A # write your code here
7dede788648d5569587214722a2a128f419a7b8a
Create v1.py
v1.py
v1.py
Python
0
@@ -0,0 +1,1289 @@ +print %22This is Pizza Pi R Squared. What's it do? It lets you determine whether buying a small pizza or a large pizza is a better value in terms of cost per bite of pizza.%22%0A %0Adiameter_one = int(raw_input(%22What's the first pizza's diameter (in inches)?%22))%0Acost_one = int(raw_input(%22How much does the first pizza cost? (in dollars and cents)%22))%0Aarea_one = (diameter_one / 2) **2 * 3.14%0Atotal_one = cost_one / area_one%0A%0Aprint %22With this pizza, you're paying %25s cents per square inch.%22 %25 (total_one)%0A%0Adiameter_two = int(raw_input(%22What's the second pizza's diameter (in inches)?%22))%0Acost_two = int(raw_input(%22How much does the second pizza cost? (in dollars and cents)%22))%0Aarea_two = (diameter_two / 2) **2 * 3.14%0Atotal_two = cost_two / area_two%0A%0Aprint %22With this pizza, you're paying %25s cents per square inch.%22 %25 (total_two)%0A%0Aif total_one %3C total_two:%0A%09print %22The first pizza is a better value. Buy that one!%22%0Aelif total_two %3C total_one:%0A%09print %22The second pizza is the better deal - more pizza for the buck. Get that one!%22%0Aelse:%0A%09print %22Same deal - get whichever you'd like!%22%0A%09%0A%0A#I'm a noob python programmer and this is my first script, other than those from %22Learn Python the Hard Way.%22 %0A#Suggestions/improvements/ideas appreciated!%0A#Feel free to drop me a note at gteninbaum@suffolk.edu.%0A
a998bd2686ab924035325d7288131a7141a457bb
Apply orphaned migration
project/apps/api/migrations/0010_remove_chart_song.py
project/apps/api/migrations/0010_remove_chart_song.py
Python
0
@@ -0,0 +1,345 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('api', '0009_auto_20150722_1041'),%0A %5D%0A%0A operations = %5B%0A migrations.RemoveField(%0A model_name='chart',%0A name='song',%0A ),%0A %5D%0A
fe668b882d3c27f8f7bf7f8cf6d338bf3216310e
add testing script in temp location
aki.py
aki.py
Python
0
@@ -0,0 +1,1534 @@ +#!/usr/bin/env python%0A# XXX this is a helpful script, but probably belongs in scriptworker/test/data%0Afrom __future__ import print_function%0A%0Aimport aiohttp%0Aimport asyncio%0Afrom copy import deepcopy%0Aimport json%0Aimport logging%0Aimport pprint%0Aimport sys%0Afrom scriptworker.constants import DEFAULT_CONFIG%0Afrom scriptworker.context import Context%0Afrom scriptworker.cot.verify import ChainOfTrust, build_chain_of_trust%0A%0Atask_id = %22S5pv1_I5SJWwGcjAFW1q6g%22%0Aif len(sys.argv) %3E 1:%0A task_id = sys.argv%5B1%5D%0Aloop = asyncio.get_event_loop()%0Acontext = Context()%0Awith open(%22/Users/asasaki/.scriptworker%22, %22r%22) as fh:%0A context.credentials = json.load(fh)%5B'credentials'%5D%0Acontext.queue = context.create_queue(context.credentials)%0Acontext.task = loop.run_until_complete(context.queue.task(task_id))%0A%0Acontext.config = dict(deepcopy(DEFAULT_CONFIG))%0Acontext.config.update(%7B%0A 'artifact_dir': '/tmp/artifacts',%0A 'base_gpg_home_dir': '/tmp/gpg',%0A%7D)%0A%0Alog = logging.getLogger('scriptworker')%0Alog.setLevel(logging.DEBUG)%0Alogging.basicConfig()%0Awith aiohttp.ClientSession() as session:%0A context.session = session%0A cot = ChainOfTrust(context, 'signing', task_id=%22J_RwqU2wR1iAegzl6bIVcg%22)%0A loop.run_until_complete(build_chain_of_trust(cot))%0A pprint.pprint(cot.dependent_task_ids())%0A print(%22Cot task_id: %7B%7D%22.format(cot.task_id))%0A for link in cot.links:%0A print(%22task_id: %7B%7D%22.format(link.task_id))%0A # print(link.cot_dir)%0A # print(link.decision_task_id)%0A context.session.close()%0Acontext.queue.session.close()%0Aloop.close()%0A
59b01485c70d42e32acb4c80efbe0e393ca8c437
Add aes.py
aes.py
aes.py
Python
0.001639
@@ -0,0 +1,1372 @@ +# -*- coding: utf-8 -*-%0A%0Aimport base64%0Afrom Crypto import Random%0Afrom Crypto.Cipher import AES%0A%0A%0Aclass AESCipher:%0A def __init__(self, key):%0A self.bs = 32%0A if len(key) %3E= 32:%0A self.key = key%5B:32%5D%0A else:%0A self.key = self._pad(key)%0A%0A def encrypt(self, raw):%0A raw = self._pad(raw)%0A iv = Random.new().read(AES.block_size)%0A cipher = AES.new(self.key, AES.MODE_CBC, iv)%0A return base64.b64encode(iv + cipher.encrypt(raw))%0A%0A def decrypt(self, enc):%0A enc = base64.b64decode(enc)%0A iv = enc%5B:AES.block_size%5D%0A cipher = AES.new(self.key, AES.MODE_CBC, iv)%0A return self._unpad(cipher.decrypt(enc%5BAES.block_size:%5D))%0A%0A def _pad(self, s):%0A %22%22%22 %E5%A1%AB%E5%85%85%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%88%B0%E9%95%BF%E5%BA%A6%E4%B8%BA self.bs %E7%9A%84%E5%80%8D%E6%95%B0 %22%22%22%0A pad_length = self.bs - len(s) %25 self.bs%0A return s + pad_length * chr(pad_length)%0A%0A def _unpad(self, s):%0A %22%22%22 %E5%8E%BB%E6%8E%89%E5%A1%AB%E5%85%85%E5%AD%97%E7%AC%A6%EF%BC%8C%E8%8E%B7%E5%8F%96%E5%8E%9F%E5%AD%97%E7%AC%A6%E4%B8%B2 %22%22%22%0A return s%5B:-ord(s%5B-1%5D)%5D%0A%0A%0Adef test(src, key='test_key'):%0A coder = AESCipher(key)%0A enc = coder.encrypt(src)%0A dec = coder.decrypt(enc)%0A print '%5Cn', '%5B%5B TEST %5D%5D'.center(70, '*'), '%5Cn'%0A print 'len: %25-3d src: %25s' %25 (len(src), repr(src))%0A print 'len: %25-3d enc: %25s' %25 (len(enc), repr(enc))%0A print 'Decrypt %25s!' %25 ('Right' if dec == src else 'Wrong')%0A%0A%0Aif __name__ == %22__main__%22:%0A for i in range(0, 10):%0A test('l' * i)%0A%0A
68b8ad567545c7ec07f13089f2b3e4ecd4cc835e
Create api.py
api.py
api.py
Python
0
@@ -0,0 +1,1016 @@ +from flask import Flask%0Afrom flask.ext.restful import reqparse, abort, Api, Resource%0Afrom profile import Profile%0A%0Aapp = Flask(__name__)%0Aapi = Api(app)%0A%0A%0A%0Adef abort_if_user_doesnt_exist(user):%0A if not isValid(user):%0A abort(404, message=%22User %7B%7D doesn't exist%22.format(user))%0A%0A# Argument validation%0Aparser = reqparse.RequestParser()%0Aparser.add_argument('user', type=str)%0A%0Adef userInfo(user):%0A return Profile(user).getProfile()%0A%0Adef isValid(user):%0A return True%0A%0A%0A# User profile%0A# show a profile for a single twitter handle%0Aclass User(Resource):%0A def get(self, user):%0A abort_if_user_doesnt_exist(user)%0A return userInfo(user)%0A%0A%0A%0A# Tweets%0A# shows a up to 100 tweets from start%0Aclass Tweets(Resource):%0A def get(self, user, start, end):%0A # TODO paginate with cursor%0A return None%0A%0A%0A##%0A## Setup the Api resource routing here%0A##%0A# TODO #api.add_resource(Tweets, '/tweets')%0Aapi.add_resource(User, '/handle/%3Cstring:user%3E')%0A%0A%0Aif __name__ == '__main__':%0A app.run(debug=True)%0A
beca6dad76746f5e6cce76749efda391d5c49a6e
Number 5 is done
ex5.py
ex5.py
Python
0.998276
@@ -0,0 +1,733 @@ +#!/usr/bin/env python%0A%0A#feel free to sub your own details.. like I have from the original of Mr Shaw's%0Amy_name = %22Davii%22%0Amy_age = 34 #not a lie!%0Amy_height = 186 #CM%0Amy_weight = 90 #kg .. I need to diet.%0Amy_eyes = %22blue%22%0Amy_teeth = %22white%22 #maybe a little coffee staining%0Amy_hair = %22Dark Brown%22%0A%0Aprint(%22Let's talk about %25s.%22 %25 my_name)%0Aprint(%22He's %25d inches tall.%22 %25 my_height)%0Aprint(%22He's %25d Kilo's heavy.%22 %25my_weight)%0Aprint(%22Yeah I need to diet.%22)%0Aprint(%22He's got %25s eyes and %25s hair.%22 %25 (my_eyes, my_hair))%0Aprint(%22His teeth are usually %25s depending on the coffee.%22 %25 my_teeth)%0A%0A# this line is tricky, try to get it right%0Aprint(%22If I add %25d, %25d, and %25d, I get %25d.%22 %25 (my_age, my_height, my_weight, my_age + my_height + my_weight)) %0A
265bedb193f8615f99daa63c921b572408921605
Add tests for quick sort
test_quick_sort.py
test_quick_sort.py
Python
0
@@ -0,0 +1,1167 @@ +# -*- coding: utf-8 -*-%0Afrom quick_sort import quick_sort%0A%0A%0Adef test_sorted():%0A my_list = list(range(100))%0A quick_sort(my_list)%0A assert my_list == list(range(100))%0A%0A%0Adef test_reverse():%0A my_list = list(range(100))%5B::-1%5D%0A quick_sort(my_list)%0A assert my_list == list(range(100))%0A%0A%0Adef test_empty():%0A my_list = %5B%5D%0A quick_sort(my_list)%0A assert my_list == %5B%5D%0A%0A%0Adef test_abc():%0A my_list = %5B'a', 'b', 'c', 'd', 'e'%5D%0A quick_sort(my_list)%0A assert my_list == %5B'a', 'b', 'c', 'd', 'e'%5D%0A my_list = %5B'e', 'd', 'c', 'b', 'a'%5D%0A quick_sort(my_list)%0A assert my_list == %5B'a', 'b', 'c', 'd', 'e'%5D%0A%0A%0Adef test_unicode():%0A my_list = %5B'%CF%80'%5D%0A quick_sort(my_list)%0A assert my_list == %5B'%5Cxcf%5Cx80'%5D%0A%0A%0Adef test_duplicate():%0A my_list = %5B1, 2, 2, 5, 3%5D%0A quick_sort(my_list)%0A assert my_list == %5B1, 2, 2, 3, 5%5D%0A%0A%0Adef test_combo():%0A my_list = %5B42, 1, 'a', 500%5D%0A quick_sort(my_list)%0A assert my_list == %5B1, 42, 500, 'a'%5D%0A my_list = %5B42, '1', 'a', '500'%5D%0A quick_sort(my_list)%0A assert my_list == %5B42, '1', '500', 'a'%5D%0A%0A%0Adef test_function():%0A my_list = %5B%5D%0A new_list = %5Bquick_sort(my_list)%5D%0A assert new_list == %5BNone%5D%0A
017fa0b360c23696d3176f48e2c53accac8bcfc5
Add version module
redcap/version.py
redcap/version.py
Python
0.000001
@@ -0,0 +1,18 @@ +VERSION = '0.5.2'%0A
426ef95ba1b2f3ac42c16a3594d186c4c9226a6e
add admin
referral/admin.py
referral/admin.py
Python
0
@@ -0,0 +1,411 @@ +from django.contrib import admin%0A%0Afrom models import Campaign, Referrer%0A%0Aclass ReferrerInine(admin.TabularInline):%0A model = Referrer%0A extra = 0%0A%0Aclass CampaignAdmin(admin.ModelAdmin):%0A inlines = (ReferrerInine, )%0A%0Aclass ReferrerAdmin(admin.ModelAdmin):%0A list_display = ('name', 'campaign', 'creation_date')%0A%0Aadmin.site.register(Campaign, CampaignAdmin)%0Aadmin.site.register(Referrer, ReferrerAdmin)%0A%0A
747800528b3709759738081ee580e380bf164c02
add skeletons of new unit tests to be added
pymatgen/analysis/defects/tests/test_compatibility.py
pymatgen/analysis/defects/tests/test_compatibility.py
Python
0
@@ -0,0 +1,824 @@ +# coding: utf-8%0A# Copyright (c) Pymatgen Development Team.%0A# Distributed under the terms of the MIT License.%0A%0Afrom __future__ import unicode_literals%0A%0Aimport unittest%0A%0Afrom pymatgen.util.testing import PymatgenTest%0A%0A%0Aclass DefectCompatibilityTest(PymatgenTest):%0A%0A def test_process_entry(self):%0A pass%0A%0A def test_perform_all_corrections(self):%0A pass%0A%0A def test_perform_freysoldt(self):%0A pass%0A%0A def test_perform_kumagai(self):%0A pass%0A%0A def test_run_bandfilling(self):%0A pass%0A%0A def test_run_band_edge_shifting(self):%0A pass%0A%0A def test_delocalization_analysis(self):%0A pass%0A%0A def test_is_freysoldt_delocalized(self):%0A pass%0A%0A def test_is_kumagai_delocalized(self):%0A pass%0A%0A def test_is_final_relaxed_structure_delocalized(self):%0A pass%0A
b68db14e5ecd2e8ccaaa0412798a8669232fb8e5
Add constraint variables in solver
solver.py
solver.py
Python
0.000002
@@ -0,0 +1,1593 @@ +from collections import namedtuple%0A%0Afrom past import autotranslate%0A# python-constraint is python2, so we'll use python-future's autotranslate function%0Aautotranslate(%5B'constraint'%5D)%0Aimport constraint%0A%0A# periods is an int for how many periods per week are required for this subject%0Asubject = namedtuple(%22subject%22, %5B'name', 'periods'%5D)%0A%0A%0Adef solve(subjects: list, max_students_per_class: int, periods_per_week: int):%0A %22%22%22%0A Create a timetable for the given number of periods, subjects, and students per subject%0A %0A Args:%0A periods_per_week (int): The number of periods in the whole timetable%0A subjects (%5Bstr%5D): The subjects that should appear on the timetable%0A max_students_per_class (int): The number of students per class%0A %22%22%22%0A%0A problem = constraint.Problem()%0A%0A # Add one variable per subject period%0A for subject in subjects:%0A # Start numbering from 1%0A for period_num in range(1, subject.periods+1):%0A problem.addVariable('%7Bsubject_name%7D-period%7Bperiod_num%7D'.format(subject_name=subject.name, period_num=period_num),%0A constraint.Domain(range(1, periods_per_week + 1)))%0A%0A%0A%0A%0A# Test data%0Aperiods_per_week = 20%0AHistorySL = subject(%22HistorySL%22, 2)%0AHistoryHL = subject(%22HistoryHL%22, 3)%0AMathSL = subject(%22MathSL%22, 2)%0AMathHL = subject(%22MathHL%22, 3)%0ABiologySL = subject(%22BiologySL%22, 2)%0ABiologyHL = subject(%22BiologyHL%22, 3)%0A%0Asubjects = %5B%0A HistorySL,%0A HistoryHL,%0A MathSL,%0A MathHL,%0A BiologySL,%0A BiologyHL%0A%5D%0A%0Amax_students_per_class = 14%0A%0Asolve(subjects, max_students_per_class, periods_per_week)%0A
92f2b8c5b44fd6f306e32275b24daddb35fa3e53
function to calculate_effective_permittivity
gdsfactory/simulation/effective_permittivity.py
gdsfactory/simulation/effective_permittivity.py
Python
0.998539
@@ -0,0 +1,2574 @@ +%22%22%22Calculate the effective refractive index for a 1D mode.%22%22%22%0A%0Afrom typing import Literal%0A%0Aimport numpy as np%0Afrom scipy.optimize import fsolve%0A%0A%0Adef calculate_effective_permittivity(%0A epsilon_film: float,%0A epsilon_substrate: float,%0A epsilon_cladding: float,%0A thickness: float,%0A wavelength: float,%0A polarization: Literal%5B%22te%22, %22tm%22%5D,%0A):%0A %22%22%22%0A Calculate the effective refractive index for a 1D mode.%0A%0A .. code::%0A%0A ----------------- %7C%0A epsilon_cladding inf%0A ----------------- %7C%0A epsilon_film thickness%0A ----------------- %7C%0A epsilon_substrate inf%0A ----------------- %7C%0A%0A Args:%0A epsilon_film: Relative permittivity of the film.%0A epsilon_substrate: Relative permittivity of the substrate.%0A epsilon_cladding: Relative permittivity of the cladding.%0A thickness: Thickness of the film.%0A wavelength: Wavelength.%0A polarization: Either %22te%22 or %22tm%22.%0A%0A Returns:%0A List of effective permittivity.%0A %22%22%22%0A if polarization == %22te%22:%0A tm = False%0A elif polarization == %22tm%22:%0A tm = True%0A else:%0A raise ValueError('Polarization must be %22te%22 or %22tm%22')%0A%0A k_0 = 2 * np.pi / wavelength%0A%0A def k_f(e_eff):%0A return k_0 * np.sqrt(epsilon_film - e_eff) / (epsilon_film if tm else 1)%0A%0A def k_s(e_eff):%0A return (%0A k_0 * np.sqrt(e_eff - epsilon_substrate) / (epsilon_substrate if tm else 1)%0A )%0A%0A def k_c(e_eff):%0A return k_0 * np.sqrt(e_eff - epsilon_cladding) / (epsilon_cladding if tm else 1)%0A%0A def objective(e_eff):%0A return 1 / np.tan(k_f(e_eff) * thickness) - (%0A k_f(e_eff) ** 2 - k_s(e_eff) * k_c(e_eff)%0A ) / (k_f(e_eff) * (k_s(e_eff) + k_c(e_eff)))%0A%0A # scan roughly for indices%0A # use a by 1e-10 smaller search area to avoid division by zero%0A x = np.linspace(%0A min(epsilon_substrate, epsilon_cladding) + 1e-10, epsilon_film - 1e-10, 1000%0A )%0A indices_temp = x%5Bnp.abs(objective(x)) %3C 0.1%5D%0A if not len(indices_temp):%0A return %5B%5D%0A%0A # and then use fsolve to get exact indices%0A indices_temp = fsolve(objective, indices_temp)%0A%0A # then make the indices unique%0A indices = %5B%5D%0A for index in indices_temp:%0A if not any(np.isclose(index, i, atol=1e-7) for i in indices):%0A indices.append(index)%0A%0A return indices%0A%0A%0Aif __name__ == %22__main__%22:%0A print(%0A calculate_effective_permittivity(%0A 3.4777**2, 1.444**2, 1.444**2, 0.22e-6, 1.55e-6, %22te%22%0A )%0A )%0A
a14d696cad5b3249997257298150977fa53f9cc8
Add lc151_reverse_words_in_a_string.py
lc151_reverse_words_in_a_string.py
lc151_reverse_words_in_a_string.py
Python
0.998758
@@ -0,0 +1,1155 @@ +%22%22%22Leetcode 151. Reverse Words in a String%0AMedium%0A%0AGiven an input string, reverse the string word by word. %0A%0AExample 1:%0AInput: %22the sky is blue%22%0AOutput: %22blue is sky the%22%0A%0AExample 2:%0AInput: %22 hello world! %22%0AOutput: %22world! hello%22%0AExplanation: Your reversed string should not contain leading or trailing spaces.%0A%0AExample 3:%0AInput: %22a good example%22%0AOutput: %22example good a%22%0AExplanation: You need to reduce multiple spaces between two words to a single %0Aspace in the reversed string.%0A %0A%0ANote:%0AA word is defined as a sequence of non-space characters.%0AInput string may contain leading or trailing spaces. However, your reversed %0Astring should not contain leading or trailing spaces.%0AYou need to reduce multiple spaces between two words to a single space in the %0Areversed string.%0A%22%22%22%0A%0Aclass Solution(object):%0A def reverseWords(self, s):%0A %22%22%22%0A :type s: str%0A :rtype: str%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A import time%0A%0A # Ans: %22blue is sky the%22.%0A s = 'the sky is blue'%0A%0A # Ans: %22world! hello%22.%0A s = ' hello world! '%0A%0A # Ans: %22example good a%22.%0A s = 'a good example'%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
b80d7927225f172653922317ef5c96e90876588d
Create SemiSupervisedTSNE.py
sstsne/SemiSupervisedTSNE.py
sstsne/SemiSupervisedTSNE.py
Python
0
@@ -0,0 +1 @@ +%0A
2fbcd2c5c47b4066e74619196dc333fa88a015d1
isolate pipe operator overload code
tests/pipe_test.py
tests/pipe_test.py
Python
0.000001
@@ -0,0 +1,1196 @@ +# -*- coding: utf-8 -*-%0Aimport asyncio%0Aimport pytest%0Aimport paco%0Afrom paco.pipe import overload%0A%0A%0Adef test_pipe_operator_overload():%0A @asyncio.coroutine%0A def filterer(x):%0A return x %3C 8%0A%0A @asyncio.coroutine%0A def mapper(x):%0A return x * 2%0A%0A @asyncio.coroutine%0A def drop(x):%0A return x %3C 10%0A%0A @asyncio.coroutine%0A def reducer(acc, x):%0A return acc + x%0A%0A @asyncio.coroutine%0A def task(numbers):%0A return (yield from (numbers%0A %7C paco.filter(filterer)%0A %7C paco.map(mapper)%0A %7C paco.dropwhile(drop)%0A %7C paco.reduce(reducer, initializer=0)))%0A%0A result = paco.run(task((1, 2, 3, 4, 5, 6, 7, 8, 9, 10)))%0A assert result == 36%0A%0A%0Adef test_overload_error():%0A with pytest.raises(TypeError, message='fn must be a callable object'):%0A overload(None)%0A%0A with pytest.raises(ValueError,%0A messsage='invalid function signature or arity'):%0A overload(lambda x: True)%0A%0A with pytest.raises(ValueError,%0A messsage='invalid function signature or arity'):%0A overload(lambda x, y: True)%0A
fd6eea38f389a440f2c7d69e0de29677a64dbd2c
Add manual wifi table migration script.
ichnaea/scripts/migrate.py
ichnaea/scripts/migrate.py
Python
0
@@ -0,0 +1,2818 @@ +%22%22%22%0AManual migration script to move networks from old single wifi table%0Ato new sharded wifi table structure.%0A%22%22%22%0Afrom collections import defaultdict%0Aimport sys%0Aimport time%0A%0Afrom ichnaea.config import read_config%0Afrom ichnaea.db import (%0A configure_db,%0A db_worker_session,%0A)%0Afrom ichnaea.models.wifi import (%0A Wifi,%0A WifiShard,%0A)%0A%0A%0Adef migrate(db, batch=1000):%0A added = 0%0A deleted = 0%0A skipped = 0%0A with db_worker_session(db, commit=True) as session:%0A old_wifis = (session.query(Wifi)%0A .order_by(Wifi.id.desc())%0A .limit(batch)).all()%0A sharded = defaultdict(list)%0A for old_wifi in old_wifis:%0A shard = WifiShard.shard_model(old_wifi.key)%0A sharded%5Bshard%5D.append(shard(%0A mac=old_wifi.key,%0A created=old_wifi.created,%0A modified=old_wifi.modified,%0A lat=old_wifi.lat,%0A lon=old_wifi.lon,%0A max_lat=old_wifi.max_lat,%0A min_lat=old_wifi.min_lat,%0A max_lon=old_wifi.max_lon,%0A min_lon=old_wifi.min_lon,%0A radius=old_wifi.range,%0A samples=old_wifi.total_measures,%0A ))%0A%0A moved_wifis = set()%0A for shard, wifis in sharded.items():%0A shard_macs = set(%5Bwifi.mac for wifi in wifis%5D)%0A existing = (session.query(shard.mac)%0A .filter(shard.mac.in_(list(shard_macs)))).all()%0A existing = set(%5Be.mac for e in existing%5D)%0A for wifi in wifis:%0A if wifi.mac not in existing:%0A moved_wifis.add(wifi.mac)%0A session.add(wifi)%0A added += 1%0A else:%0A skipped += 1%0A%0A if moved_wifis:%0A query = (session.query(Wifi)%0A .filter(Wifi.key.in_(list(moved_wifis))))%0A deleted = query.delete(synchronize_session=False)%0A else:%0A deleted = 0%0A return (added, deleted, skipped)%0A%0A%0Adef main(db, repeat=1, batch=1000):%0A for i in range(repeat):%0A start = time.time()%0A print('Start: %25s' %25 time.strftime('%25H:%25m', time.gmtime(start)))%0A added, deleted, skipped = migrate(db, batch=batch)%0A end = int((time.time() - start) * 1000)%0A print('Added: %25s, Deleted: %25s, Skipped: %25s' %25 (%0A added, deleted, skipped))%0A print('Took: %25s ms%5Cn' %25 end)%0A print('End')%0A%0A%0Aif __name__ == '__main__':%0A argv = sys.argv%0A batch = 1000%0A repeat = 1%0A if len(argv) %3E 1:%0A batch = int(argv%5B-1%5D)%0A if len(argv) %3E 2:%0A repeat = int(argv%5B-2%5D)%0A app_config = read_config()%0A db = configure_db(app_config.get('database', 'rw_url'))%0A main(db, repeat=repeat, batch=batch)%0A
d4c7869d62635eca3108d743c2bc12c9f394d68a
Add archive.File class, which allows downloading from archive.org
tests/test_item.py
tests/test_item.py
Python
0
@@ -0,0 +1,493 @@ +import os, sys%0Ainc_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))%0Asys.path.insert(0, inc_path)%0A%0Aimport archive%0A%0Adef test_item():%0A item = archive.Item('stairs')%0A assert item.metadata%5B'metadata'%5D%5B'identifier'%5D == 'stairs'%0A%0A%0Adef test_file():%0A item = archive.Item('stairs')%0A filename = 'glogo.png'%0A file = item.file(filename)%0A%0A assert not os.path.exists(filename)%0A file.download()%0A%0A assert os.stat(filename).st_size == file.size%0A os.unlink(filename)%0A
fa4b4de37b38f0ff800bbd2ac007ab6521720258
Add test for box migration script
scripts/tests/test_box_migrate_to_external_account.py
scripts/tests/test_box_migrate_to_external_account.py
Python
0
@@ -0,0 +1,2808 @@ +from nose.tools import *%0A%0Afrom scripts.box.migrate_to_external_account import do_migration, get_targets%0A%0Afrom framework.auth import Auth%0A%0Afrom tests.base import OsfTestCase%0Afrom tests.factories import ProjectFactory, UserFactory%0A%0Afrom website.addons.box.model import BoxUserSettings%0Afrom website.addons.box.tests.factories import BoxOAuthSettingsFactory%0A%0A%0Aclass TestBoxMigration(OsfTestCase):%0A # Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions%0A%0A def test_migration_no_project(self):%0A%0A user = UserFactory()%0A%0A user.add_addon('box')%0A user_addon = user.get_addon('box')%0A user_addon.oauth_settings = BoxOAuthSettingsFactory()%0A user_addon.save()%0A%0A do_migration(%5Buser_addon%5D)%0A user_addon.reload()%0A%0A assert_is_none(user_addon.oauth_settings)%0A assert_equal(len(user.external_accounts), 1)%0A%0A account = user.external_accounts%5B0%5D%0A assert_equal(account.provider, 'box')%0A assert_equal(account.oauth_key, 'abcdef1')%0A%0A def test_migration_removes_targets(self):%0A BoxUserSettings.remove()%0A%0A user = UserFactory()%0A project = ProjectFactory(creator=user)%0A%0A user.add_addon('box', auth=Auth(user))%0A user_addon = user.get_addon('box')%0A user_addon.oauth_settings = BoxOAuthSettingsFactory()%0A user_addon.save()%0A%0A project.add_addon('box', auth=Auth(user))%0A node_addon = project.get_addon('box')%0A node_addon.foreign_user_settings = user_addon%0A node_addon.save()%0A%0A assert_equal(get_targets().count(), 1)%0A%0A do_migration(%5Buser_addon%5D)%0A user_addon.reload()%0A%0A assert_equal(get_targets().count(), 0)%0A%0A def test_migration_multiple_users(self):%0A user1 = UserFactory()%0A user2 = UserFactory()%0A oauth_settings = BoxOAuthSettingsFactory()%0A%0A user1.add_addon('box')%0A user1_addon = user1.get_addon('box')%0A user1_addon.oauth_settings = oauth_settings%0A user1_addon.save()%0A%0A user2.add_addon('box')%0A user2_addon = user2.get_addon('box')%0A user2_addon.oauth_settings = oauth_settings%0A user2_addon.save()%0A%0A do_migration(%5Buser1_addon, user2_addon%5D)%0A user1_addon.reload()%0A user2_addon.reload()%0A%0A assert_equal(%0A user1.external_accounts%5B0%5D,%0A user2.external_accounts%5B0%5D,%0A )%0A%0A def test_get_targets(self):%0A BoxUserSettings.remove()%0A addons = %5B%0A BoxUserSettings(),%0A BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),%0A %5D%0A for addon in addons:%0A addon.save()%0A targets = get_targets()%0A assert_equal(targets.count(), 1)%0A assert_equal(targets%5B0%5D._id, addons%5B-1%5D._id)%0A
47d24759f25a183f26f96fe595fcbd3750ef0149
Fix pjlink issue (#20510)
homeassistant/components/media_player/pjlink.py
homeassistant/components/media_player/pjlink.py
""" Support for controlling projector via the PJLink protocol. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.pjlink/ """ import logging import voluptuous as vol from homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the PJLink platform.""" host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if 'pjlink' not in hass.data: hass.data['pjlink'] = {} hass_data = hass.data['pjlink'] device_label = "{}:{}".format(host, port) if device_label in hass_data: return device = PjLinkDevice(host, port, name, encoding, password) hass_data[device_label] = device add_entities([device], True) def format_input_source(input_source_name, input_source_number): """Format input source for display in UI.""" return "{} {}".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice): """Representation of a PJLink device.""" def __init__(self, host, port, name, encoding, password): """Iinitialize the PJLink device.""" self._host = host self._port = port self._name = name self._password = password self._encoding = encoding self._muted = False self._pwstate = STATE_OFF self._current_source = None with self.projector() as projector: if not self._name: self._name = projector.get_name() inputs = projector.get_inputs() self._source_name_mapping = \ {format_input_source(*x): x for x in inputs} self._source_list = sorted(self._source_name_mapping.keys()) def projector(self): """Create PJLink Projector instance.""" from pypjlink import Projector projector = Projector.from_address( self._host, self._port, self._encoding) projector.authenticate(self._password) return projector def update(self): """Get the latest state from the device.""" with self.projector() as projector: pwstate = projector.get_power() if pwstate == 'off': self._pwstate = STATE_OFF else: self._pwstate = STATE_ON self._muted = projector.get_mute()[1] self._current_source = \ format_input_source(*projector.get_input()) @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._pwstate @property def is_volume_muted(self): """Return boolean indicating mute status.""" return self._muted @property def source(self): """Return current input source.""" return self._current_source @property def source_list(self): """Return all available input sources.""" return self._source_list @property def supported_features(self): """Return projector supported features.""" return SUPPORT_PJLINK def turn_off(self): """Turn projector off.""" with self.projector() as projector: projector.set_power('off') def turn_on(self): """Turn projector on.""" with self.projector() as projector: projector.set_power('on') def mute_volume(self, mute): """Mute (true) of unmute (false) media player.""" with self.projector() as projector: from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): """Set the input source.""" source = self._source_name_mapping[source] with self.projector() as projector: projector.set_input(*source)
Python
0.000001
@@ -3055,32 +3055,86 @@ the device.%22%22%22%0A + from pypjlink.projector import ProjectorError%0A with sel @@ -3153,32 +3153,53 @@ ) as projector:%0A + try:%0A pwst @@ -3238,16 +3238,20 @@ + if pwsta @@ -3257,18 +3257,34 @@ ate -== 'off':%0A +in ('on', 'warm-up'):%0A @@ -3310,35 +3310,38 @@ wstate = STATE_O -FF%0A +N%0A else @@ -3342,16 +3342,20 @@ else:%0A + @@ -3377,34 +3377,39 @@ wstate = STATE_O -N%0A +FF%0A self @@ -3442,16 +3442,20 @@ te()%5B1%5D%0A + @@ -3499,16 +3499,20 @@ + format_i @@ -3546,16 +3546,548 @@ input()) +%0A except KeyError as err:%0A if str(err) == %22'OK'%22:%0A self._pwstate = STATE_OFF%0A self._muted = False%0A self._current_source = None%0A else:%0A raise%0A except ProjectorError as err:%0A if str(err) == 'unavailable time':%0A self._pwstate = STATE_OFF%0A self._muted = False%0A self._current_source = None%0A else:%0A raise %0A%0A @p
7056f00934c0956bfe1a6aed7558cb3b9fd1de57
add ability to retrieve sorted profiler statistics
tornado_profile.py
tornado_profile.py
"""Profile a Tornado application via REST.""" import tornado.web import yappi __author__ = "Megan Kearl Patten <megkearl@gmail.com>" def start_profiling(): """Start profiler.""" # POST /profiler yappi.start(builtins=False, profile_threads=False) def is_profiler_running(): """Return True if the profiler is running.""" # GET /profiler yappi.is_running() def stop_profiling(): """Stop the profiler.""" # DELETE /profiler yappi.stop() def clear_stats(): """Clear profiler statistics.""" # DELETE /profiler/stats yappi.clear_stats() def get_statistics(): """Get profiler statistics.""" # GET /profiler/stats?sort=cumulative&total=20 y_func_stats = yappi.get_func_stats() pstats = yappi.convert2pstats(y_func_stats) pstats.strip_dirs() pstats.sort_stats("cumulative").print_stats(20) class TornadoProfiler(object): def __init__(self, prefix="", handler_base_class=None): # class UpdatedClass(cls, handler_base_class): pass pass def get_routes(self): return [] def main(port=8888): """Run as sample test server.""" import tornado.ioloop routes = [] + TornadoProfiler().get_routes() app = tornado.web.Application(routes) app.listen(port) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main(port=8888)
Python
0
@@ -39,16 +39,49 @@ EST.%22%22%22%0A +from operator import itemgetter%0A%0A import t @@ -891,16 +891,954 @@ s(20)%0A%0A%0A +def get_profiler_statistics(sort=%22cum_time%22, count=20):%0A %22%22%22Return profiler statistics.%0A%0A :param str sort: dictionary key to sort by%0A :param int%7CNone count: the number of results to return, None returns all results.%0A %22%22%22%0A json_stats = %5B%5D%0A pstats = yappi.convert2pstats(yappi.get_func_stats())%0A pstats.strip_dirs()%0A%0A for func, func_stat in pstats.stats.iteritems():%0A path, line, func_name = func%0A cc, num_calls, total_time, cum_time, callers = func_stat%0A json_stats.append(%7B%0A %22path%22: path,%0A %22line%22: line,%0A %22func_name%22: func_name,%0A %22num_calls%22: num_calls,%0A %22total_time%22: total_time,%0A %22total_time_per_call%22: total_time/num_calls if total_time else 0,%0A %22cum_time%22: cum_time,%0A %22cum_time_per_call%22: cum_time/num_calls if cum_time else 0%0A %7D)%0A%0A return sorted(json_stats, key=itemgetter(sort))%5B:count%5D%0A%0A%0A class To
97d62cd3cb08c8d43a804eb7989b03df3626f0ab
Create music.py
music.py
music.py
Python
0.000003
@@ -0,0 +1,214 @@ +from microbit import *%0Aimport music%0Aimport random%0A%0Awhile True:%0A %0A music.play(music.NYAN, loop=True, wait=False)%0A %0A if getValidDab():%0A music.play(music.POWER_UP)%0A else:%0A music.play(music.POWER_DOWN)%0A %0A
9c26b042c38963bf95cc6456b0f9082c1c0827f3
Add ttype API tests
tests/test_api_ttype.py
tests/test_api_ttype.py
Python
0
@@ -0,0 +1,1632 @@ +from urllib import urlencode%0Aimport json%0Afrom .base import MyTestCase%0Afrom privacyidea.lib.user import (User)%0Afrom privacyidea.lib.tokens.totptoken import HotpTokenClass%0Afrom privacyidea.models import (Token)%0Afrom privacyidea.lib.config import (set_privacyidea_config, get_token_types,%0A get_inc_fail_count_on_false_pin,%0A delete_privacyidea_config)%0Afrom privacyidea.lib.token import (get_tokens, init_token, remove_token,%0A reset_token)%0A%0Afrom privacyidea.lib.error import (ParameterError, UserError)%0A%0APWFILE = %22tests/testdata/passwords%22%0A%0A%0Aclass TtypeAPITestCase(MyTestCase):%0A %22%22%22%0A test the api.ttype endpoints%0A %22%22%22%0A%0A def test_00_create_realms(self):%0A self.setUp_user_realms()%0A%0A def test_01_tiqr(self):%0A init_token(%7B%22serial%22: %22TIQR1%22,%0A %22type%22: %22tiqr%22,%0A %22user%22: %22cornelius%22,%0A %22realm%22: self.realm1%7D)%0A with self.app.test_request_context('/ttype/tiqr',%0A method='POST',%0A data=%7B%22action%22: %22metadata%22,%0A %22serial%22: %22TIQR1%22,%0A %22session%22: %2212345%22%7D):%0A res = self.app.full_dispatch_request()%0A data = json.loads(res.data)%0A identity = data.get(%22identity%22)%0A service = data.get(%22service%22)%0A self.assertEqual(identity.get(%22displayName%22), %22Cornelius %22)%0A self.assertEqual(service.get(%22displayName%22), %22privacyIDEA%22)%0A
86cae13f7dde04f7031ae111e596f2d8c03d5420
Add tests of CSVFile and StdOut recorders
tests/test_recorders.py
tests/test_recorders.py
Python
0
@@ -0,0 +1,1496 @@ +import pytest%0Afrom plumbium.processresult import record, pipeline, call%0Afrom plumbium.recorders import CSVFile, StdOut%0Afrom collections import OrderedDict%0A%0A%0A@pytest.fixture%0Adef simple_pipeline():%0A @record()%0A def recorded_function():%0A call(%5B'echo', '6.35'%5D)%0A%0A def a_pipeline():%0A recorded_function()%0A%0A return a_pipeline%0A%0A%0Adef test_csvfile(simple_pipeline, tmpdir):%0A with tmpdir.as_cwd():%0A recorder = CSVFile(%0A 'test.csv',%0A OrderedDict(%5B%0A ('id', lambda x: x%5B'metadata'%5D%5B'id'%5D),%0A ('data', lambda x: x%5B'processes'%5D%5B0%5D%5B'printed_output'%5D.strip())%0A %5D)%0A )%0A pipeline.run(%0A 'test',%0A simple_pipeline,%0A str(tmpdir),%0A metadata=%7B'id': 1%7D,%0A recorder=recorder%0A )%0A with open('test.csv') as f:%0A assert f.readline().strip() == 'id,data'%0A assert f.readline().strip() == '1,6.35'%0A%0A%0Adef test_stdout(simple_pipeline, tmpdir, capsys):%0A with tmpdir.as_cwd():%0A recorder = StdOut(%0A OrderedDict(%5B%0A ('id', lambda x: x%5B'metadata'%5D%5B'id'%5D),%0A ('data', lambda x: x%5B'processes'%5D%5B0%5D%5B'printed_output'%5D.strip())%0A %5D)%0A )%0A pipeline.run(%0A 'test',%0A simple_pipeline,%0A str(tmpdir),%0A metadata=%7B'id': 1%7D,%0A recorder=recorder%0A )%0A out, err = capsys.readouterr()%0A assert out == 'id: 1%5Cndata: 6.35%5Cn'%0A
2ccc6127c817c8c9b88d11a405ee96e8c33e8e4b
print the pre-cleaned and validated params to console to see the objects we've retrieved, call the .items() method on headers to get more direct access to the WebOb request object's header vals.
server.py
server.py
import webapp2 from paste import httpserver __author__ = "Brian Tomlinson <darthlukan@gmail.com>" class Processor(object): """ Performs some basic validation of data and cleanup as needed before passing off to the "heavy lifter" NOTE: Dummy logic for now as placeholders. """ def __init__(self): pass def validate(self, params): """ Do we even have valid data to work with? """ if params: return "validated: %s" % params return False def clean(self, params): """ Clean it up prior to sending to the processor """ if params: return "cleaned: %s" % params return False def process(self, params): """ Send to processor and return whatever we get back. """ params = "processor_module_call_here(%s)" % params return params def validation_entry(self, params): validated = self.validate(params) cleaned = self.clean(validated) processed = self.process(cleaned) return processed class Handler(webapp2.RequestHandler): def __init__(self, request, response): # Like calling super, but with the built-in for webapp2 self.initialize(request, response) self.proc = Processor() def get(self): params = { 'headers': self.request.headers, 'body': self.request.body, 'content-type': self.request.content_type, 'query-string': self.request.query_string, 'params': self.request.params } processed = self.proc.validation_entry(params) if processed: self.response.write("validated, cleaned, and processed GET data: %s" % processed) else: self.response.write('Error handling and appropriate status code would go here') def post(self): params = { 'headers': self.request.headers, 'body': self.request.body, 'content-type': self.request.content_type, 'query-string': self.request.query_string, 'params': self.request.params } processed = self.proc.validation_entry(params) if processed: self.response.write("validated, cleaned, and processed POST data: %s" % processed) else: self.response.write('Error handling and appropriate status code goes here.') if __name__ == '__main__': app = webapp2.WSGIApplication([ ('/', Handler), ], debug=True) httpserver.serve(app, host='127.0.0.1', port='8080')
Python
0
@@ -1,12 +1,39 @@ +from pprint import pprint%0A%0A import webap @@ -1422,34 +1422,42 @@ .request.headers +.items() ,%0A - 'bod @@ -1633,32 +1633,55 @@ arams%0A %7D%0A + pprint(params)%0A processe @@ -2024,16 +2024,24 @@ .headers +.items() ,%0A @@ -2217,32 +2217,32 @@ .request.params%0A - %7D%0A @@ -2227,32 +2227,55 @@ arams%0A %7D%0A + pprint(params)%0A processe
c850cb4832e6273c8239eeb7d457d8e16bb472d6
Add graph factory
graph_generation/graph_template.py
graph_generation/graph_template.py
Python
0
@@ -0,0 +1,687 @@ +%22%22%22%0AThis module implements factory for creating a graph.%0ACurrent version supports proto and networkx graphs.%0A%22%22%22%0A%0Afrom proto_graph import ProtoGraph%0Afrom nx_graph import NxGraph%0A%0A%0Aclass GraphTemplate:%0A %22%22%22%0A A class to get instance of a selected class for graph generation.%0A%0A ...%0A%0A Methods:%0A get_proto_graph()%0A Return instance of a proto graph.%0A%0A get_nx_graph()%0A Return instance of a networkx graph.%0A %22%22%22%0A @staticmethod%0A def get_proto_graph():%0A %22%22%22Return instance of a proto graph.%22%22%22%0A return ProtoGraph()%0A%0A @staticmethod%0A def get_nx_graph():%0A %22%22%22Return instance of a networkx graph.%22%22%22%0A return NxGraph()%0A
4bd9e4db4af430ae34ed87f695d72ae99ba5bb70
Set up first test level, started to create constraints
solver.py
solver.py
Python
0.000001
@@ -0,0 +1,892 @@ +from constraint import *%0A%0A# Empty space is 0%0A# Brick is a 1%0A# Block is a 2%0A# West facing player - 3%0A# East facing player - 4%0A# Door - 5%0Alevel = %5B%5B1,1,1,1,1,1,1,1,1,1%5D,%0A %5B1,0,0,0,0,0,0,0,0,1%5D,%0A %5B1,0,0,0,0,0,0,0,0,1%5D,%0A %5B1,0,0,0,0,0,0,0,0,1%5D,%0A %5B1,0,0,0,0,0,0,0,0,1%5D,%0A %5B1,0,0,0,0,0,0,0,0,1%5D,%0A %5B1,0,0,0,0,0,0,0,0,1%5D,%0A %5B1,0,0,0,0,0,0,0,0,1%5D,%0A %5B1,5,0,0,0,0,0,3,0,1%5D,%0A %5B1,1,1,1,1,1,1,1,1,1%5D%5D%0A%0Aproblem = Problem()%0A%0A# moves%0A# move east - e%0A# move west - w%0A# more nortwest - nw%0A# more norteast - ne%0A# pickup block - p%0A# drop block - d%0A# fall - f%0Aproblem.addVariable(%22e%22, %5B%5B4,0%5D,%5B3,0%5D,%5B3,1%5D,%5B3,2%5D%5D )%0Aproblem.addVariable(%22w%22, %5B%5B0,3%5D,%5B0,4%5D, %5B1,4%5D,%5B2,4%5D%5D)%0Aproblem.addVariable(%22nw%22, %5B0%5D)%0Aproblem.addVariable(%22ne%22, %5B0%5D)%0Aproblem.addVariable(%22p%22, %5B0%5D)%0Aproblem.addVariable(%22d%22, %5B0%5D)%0A%0A%0A%0A%0A%0Asolutions = problem.getSolutions()%0Aprint(solutions)%0A
c92e0350527e7715b6b625c33a79c993aeae66fd
Add gui.py
gui.py
gui.py
Python
0.000002
@@ -0,0 +1,750 @@ +#!/usr/bin/python%0A%0Aimport sys%0Afrom PyQt5.QtWidgets import QMainWindow, QDesktopWidget, QApplication%0A%0Aclass MainWindow(QMainWindow):%0A%0A def __init__(self):%0A super(MainWindow, self).__init__()%0A self.init_UI()%0A%0A def init_UI(self):%0A WINDOW_WIDTH = 800%0A WINDOW_HEIGHT = 800%0A self.resize(WINDOW_WIDTH, WINDOW_HEIGHT)%0A self.center()%0A self.setWindowTitle('Test GUI')%0A self.show()%0A%0A def center(self):%0A screen = QDesktopWidget().screenGeometry()%0A size = self.geometry()%0A self.move((screen.width()-size.width())/2,%0A (screen.height()-size.height())/2)%0A%0A# if __name__ == '__main__':%0Aapp = QApplication(sys.argv)%0Amain_window = MainWindow()%0Asys.exit(app.exec_())%0A
2fb5557aed14d047d1ae120f0ff91c0e355d779f
Add simple perf measuring tool
ref.py
ref.py
Python
0.000001
@@ -0,0 +1,516 @@ +#!/usr/bin/env python2%0A%0Aimport sys%0Aimport subprocess%0A%0A%22%22%22%0AUsage:%0A ./ref.py ./main -B 1000000 -t 3 -T 31%0A%22%22%22%0A%0Asystem = subprocess.check_output%0A%0Agithash = system(%22git rev-parse HEAD%22, shell=True).strip()%0Adate = system(%22date -Ihours%22, shell=True).strip()%0A%0Afilename = %22reference.%25s.%25s%22 %25 (githash, date)%0A%0Abenchargs = sys.argv%5B1:%5D%0A%0Awith open(filename, %22wb%22) as fh:%0A fh.write(%22 %22.join(benchargs) + %22%5Cn%22)%0A%0A system(benchargs) # warm up%0A results = system(benchargs)%0A fh.write(results)%0A%0Aprint %22Wrote%22, filename%0A
3bb3a1f1babab9e6516f635290baa4d4e9762b8d
add pressure box device
mecode/devices/efd_pressure_box.py
mecode/devices/efd_pressure_box.py
Python
0
@@ -0,0 +1,1182 @@ +import serial%0A%0ASTX = '%5Cx02' #Packet Start%0AETX = '%5Cx03' #Packet End%0AACK = '%5Cx06' #Acknowledge%0ANAK = '%5Cx15' #Not Acknowledge%0AENQ = '%5Cx05' #Enquiry%0AEOT = '%5Cx04' #End Of Transmission%0A%0A%0Aclass EFDPressureBox(object):%0A %0A def __init__(self, comport='COM4'):%0A self.comport = comport%0A self.connect()%0A %0A def connect(self):%0A self.s = serial.Serial(self.comport, baudrate=115200,%0A parity='N', stopbits=1, bytesize=8)%0A %0A def disconnect(self):%0A self.s.close()%0A %0A def send(self, command):%0A checksum = self._calculate_checksum(command)%0A msg = ENQ + STX + command + checksum + ETX + EOT%0A self.s.write(msg)%0A self.s.read(self.s.inWaiting())%0A %0A def set_pressure(self, pressure):%0A command = '08PS %7B%7D'.format(str(int(pressure * 10)).zfill(4))%0A self.send(command)%0A %0A def toggle_pressure(self):%0A command = '04DI '%0A self.send(command)%0A %0A def _calculate_checksum(self, string):%0A checksum = 0%0A for char in string:%0A checksum -= ord(char)%0A checksum %25= 256%0A return hex(checksum)%5B2:%5D.upper()
7927fd0c13f14b348faa63c08683c6f80bdc7a0f
Create 5.3_nextsmallbig.py
5.3_nextsmallbig.py
5.3_nextsmallbig.py
Python
0.010827
@@ -0,0 +1,133 @@ +%22%22%22%0Agiven a positive integer, return the next smallest and largest%0Anumber with the same number of 1s in the binary represenation%0A%22%22%22%0A
2ec45bb864634084d5d951649d6a2f6a77cee76f
Update forward compatibility horizon to 2019-05-21
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 5, 20) @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibiltiy, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ global _FORWARD_COMPATIBILITY_HORIZON try: old_compat_date = _FORWARD_COMPATIBILITY_HORIZON _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day) yield finally: _FORWARD_COMPATIBILITY_HORIZON = old_compat_date
Python
0
@@ -1139,17 +1139,17 @@ 19, 5, 2 -0 +1 )%0A%0A%0A@tf_
718503b075d7bf79f430ea9de4fdcc95ad665e7d
Update forward compatibility horizon to 2019-07-06
tensorflow/python/compat/compat.py
tensorflow/python/compat/compat.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for API compatibility between TensorFlow release versions. See [Version Compatibility](https://tensorflow.org/guide/version_compat#backward_forward) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export _FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 7, 5) @tf_export("compat.forward_compatible") def forward_compatible(year, month, day): """Return true if the forward compatibility window has expired. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). Forward-compatibility refers to scenarios where the producer of a TensorFlow model (a GraphDef or SavedModel) is compiled against a version of the TensorFlow library newer than what the consumer was compiled against. The "producer" is typically a Python program that constructs and trains a model while the "consumer" is typically another program that loads and serves the model. TensorFlow has been supporting a 3 week forward-compatibility window for programs compiled from source at HEAD. For example, consider the case where a new operation `MyNewAwesomeAdd` is created with the intent of replacing the implementation of an existing Python wrapper - `tf.add`. The Python wrapper implementation should change from something like: ```python def add(inputs, name=None): return gen_math_ops.add(inputs, name) ``` to: ```python from tensorflow.python.compat import compat def add(inputs, name=None): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) # To maintain forward compatibiltiy, use the old implementation. return gen_math_ops.add(inputs, name) ``` Where `year`, `month`, and `day` specify the date beyond which binaries that consume a model are expected to have been updated to include the new operations. This date is typically at least 3 weeks beyond the date the code that adds the new operation is committed. Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Returns: True if the caller can expect that serialized TensorFlow graphs produced can be consumed by programs that are compiled with the TensorFlow library source code after (year, month, day). """ return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day) @tf_export("compat.forward_compatibility_horizon") @tf_contextlib.contextmanager def forward_compatibility_horizon(year, month, day): """Context manager for testing forward compatibility of generated graphs. See [Version compatibility](https://tensorflow.org/guide/version_compat#backward_forward). To ensure forward compatibility of generated graphs (see `forward_compatible`) with older binaries, new features can be gated with: ```python if compat.forward_compatible(year=2018, month=08, date=01): generate_graph_with_new_features() else: generate_graph_so_older_binaries_can_consume_it() ``` However, when adding new features, one may want to unittest it before the forward compatibility window expires. This context manager enables such tests. For example: ```python from tensorflow.python.compat import compat def testMyNewFeature(self): with compat.forward_compatibility_horizon(2018, 08, 02): # Test that generate_graph_with_new_features() has an effect ``` Args: year: A year (e.g., 2018). Must be an `int`. month: A month (1 <= month <= 12) in year. Must be an `int`. day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an `int`. Yields: Nothing. """ global _FORWARD_COMPATIBILITY_HORIZON try: old_compat_date = _FORWARD_COMPATIBILITY_HORIZON _FORWARD_COMPATIBILITY_HORIZON = datetime.date(year, month, day) yield finally: _FORWARD_COMPATIBILITY_HORIZON = old_compat_date
Python
0
@@ -1142,9 +1142,9 @@ 7, -5 +6 )%0A%0A%0A
c9e111804974f21dbe297855ab217e964526baa2
Add search_hints option.
tensorflow/tools/docs/generate2.py
tensorflow/tools/docs/generate2.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""A tool to generate api_docs for TensorFlow2. ``` python generate2.py --output_dir=/tmp/out ``` Requires a local installation of: https://github.com/tensorflow/docs/tree/master/tools tf-nightly-2.0-preview """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from os import path from absl import app from absl import flags import tensorflow as tf from tensorflow_docs.api_generator import generate_lib FLAGS = flags.FLAGS flags.DEFINE_string( "code_url_prefix", "/code/stable/tensorflow/", "A url to prepend to code paths when creating links to defining code") flags.DEFINE_string( "output_dir", "/tmp/out", "A directory, where the docs will be output to.") def build_docs(output_dir, code_url_prefix): """Build api docs for tensorflow v2. Args: output_dir: A string path, where to put the files. code_url_prefix: prefix for "Defined in" links. """ base_dir = path.dirname(tf.__file__) doc_generator = generate_lib.DocGenerator( root_title="TensorFlow 2.0 Preview", py_modules=[("tf", tf)], base_dir=base_dir, search_hints=True, code_url_prefix=code_url_prefix, site_path="api_docs/") doc_generator.build(output_dir) def main(argv): del argv build_docs(output_dir=FLAGS.output_dir, code_url_prefix=FLAGS.code_url_prefix) if __name__ == "__main__": app.run(main)
Python
0.999137
@@ -1438,16 +1438,134 @@ to.%22)%0A%0A +flags.DEFINE_bool(%22search_hints%22, True,%0A %22Include meta-data search hints at the top of each file.%22)%0A%0A %0Adef bui @@ -1599,16 +1599,35 @@ l_prefix +, search_hints=True ):%0A %22%22%22 @@ -1776,16 +1776,96 @@ links.%0A + search_hints: Bool. Include meta-data search hints at the top of each file.%0A %22%22%22%0A @@ -2064,20 +2064,28 @@ h_hints= -True +search_hints ,%0A @@ -2302,16 +2302,62 @@ l_prefix +,%0A search_hints=FLAGS.search_hints )%0A%0A%0Aif _
1e82d6110bee6953b78ee357ed5e0b94710b1357
fix urls
heroku/urls.py
heroku/urls.py
Python
0.005145
@@ -0,0 +1,673 @@ +from django.conf.urls import include, url%0A%0Aurlpatterns = %5B%0A url(r'%5Efafl', include('fafl.urls')),%0A url(r'%5E', include('aristotle_cloud.urls')),%0A url(r'%5Epublish/', include('aristotle_mdr.contrib.self_publish.urls', app_name=%22aristotle_self_publish%22, namespace=%22aristotle_self_publish%22)),%0A url(r'%5E', include('aristotle_mdr.contrib.links.urls', app_name=%22aristotle_mdr_links%22, namespace=%22aristotle_links%22)),%0A url(r'%5E', include('aristotle_mdr.contrib.slots.urls', app_name=%22aristotle_slots%22, namespace=%22aristotle_slots%22)),%0A url(r'%5E', include('aristotle_mdr.contrib.identifiers.urls', app_name=%22aristotle_mdr_identifiers%22, namespace=%22aristotle_identifiers%22)),%0A%5D%0A
6f9ced48a8c423e505e21cfa9a0b0d05b4c86f5c
Add lava context processor.
lava_server/context_processors.py
lava_server/context_processors.py
Python
0.000001
@@ -0,0 +1,875 @@ +# Copyright (C) 2010, 2011 Linaro Limited%0A#%0A# Author: Zygmunt Krynicki %3Czygmunt.krynicki@linaro.org%3E%0A#%0A# This file is part of LAVA Server.%0A#%0A# LAVA Server is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License version 3%0A# as published by the Free Software Foundation%0A#%0A# LAVA Server is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with LAVA Server. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0A%0Afrom lava_server.extension import loader%0A%0Adef lava(request):%0A return %7B%0A 'lava': %7B%0A 'extensions': loader.extensions%0A %7D%0A %7D%0A
54f6b9e5d8769ba608fe0d3f14eda2746319d6d2
Add class DepthSerializerMixin
mixins.py
mixins.py
Python
0
@@ -0,0 +1,418 @@ +class DepthSerializerMixin(object):%0A%09%22%22%22Custom method 'get_serializer_class', set attribute 'depth' based on query parameter in the url%22%22%22%0A%09%0A%09def get_serializer_class(self):%0A%09%09serializer_class = self.serializer_class%0A%09%09query_params = self.request.QUERY_PARAMS%0A%09%09depth = query_params.get('__depth', None)%0A%09%09serializer_class.Meta.depth = int(depth) if(depth != None and depth.isdigit()) else 0%0A%09%09return serializer_class%0A